| import datasets | |
| import csv | |
| import requests | |
| citation=''' | |
| ''' | |
| class Config(datasets.BuilderConfig): | |
| citation=citation | |
| files = [ | |
| "eng.dep.covdtb", | |
| "eng.dep.scidtb", | |
| "eng.pdtb.pdtb", | |
| "eng.pdtb.tedm", | |
| "eng.rst.gum", | |
| "eng.rst.rstdt", | |
| "eng.sdrt.stac", | |
| "deu.rst.pcc", | |
| "eus.rst.ert", | |
| "fas.rst.prstc", | |
| "fra.sdrt.annodis", | |
| "ita.pdtb.luna", | |
| "nld.rst.nldt", | |
| "por.pdtb.crpc", | |
| "por.pdtb.tedm", | |
| "por.rst.cstn", | |
| "rus.rst.rrt", | |
| "spa.rst.rststb", | |
| "spa.rst.sctb", | |
| "tha.pdtb.tdtb", | |
| "tur.pdtb.tdb", | |
| "tur.pdtb.tedm", | |
| "zho.dep.scidtb", | |
| "zho.pdtb.cdtb", | |
| "zho.rst.gcdt", | |
| "zho.rst.sctb", | |
| ] | |
| files = [ | |
| "deu.rst.pcc", | |
| "eng.pdtb.pdtb", | |
| "eng.rst.gum", | |
| "eng.rst.rstdt", | |
| "eng.sdrt.stac", | |
| "eus.rst.ert", | |
| "fas.rst.prstc", | |
| "fra.sdrt.annodis", | |
| "fra.sdrt.annodis bug fixes", | |
| "nld.rst.nldt", | |
| "por.rst.cstn", | |
| "rus.rst.rrt", | |
| "spa.rst.rststb", | |
| "spa.rst.sctb", | |
| "tur.pdtb.tdb", | |
| "zho.pdtb.cdtb", | |
| "zho.rst.sctb", | |
| ] | |
| _URLs = {f'{task}-{split}':f"https://raw.githubusercontent.com/disrpt/sharedtask2021/main/data/{task}/{task}_{split}.rels" \ | |
| for task in files for split in 'train dev test'.split()} | |
| _URLs = {k:v for k,v in _URLs.items() if requests.get(v).status_code!=404} | |
| class Dataset(datasets.GeneratorBasedBuilder): | |
| BUILDER_CONFIGS = [ | |
| Config( | |
| name=n, | |
| data_dir=n | |
| ) for n in files | |
| ] | |
| def _split_generators(self, dl_manager: datasets.DownloadManager): | |
| data_file = dl_manager.download(_URLs) | |
| train_key = self.config.data_dir+'-train' | |
| if train_key in data_file: | |
| train=[datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file[train_key]})] | |
| else: | |
| train=[] | |
| return train+[ | |
| datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_file[self.config.data_dir+'-dev']}), | |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_file[self.config.data_dir+'-test']}), | |
| ] | |
| def _info(self): return datasets.DatasetInfo() | |
| def _generate_examples(self, filepath): | |
| with open(filepath, encoding="utf-8") as f: | |
| reader = csv.DictReader(f,delimiter='\t') | |
| for id_, row in enumerate(reader): | |
| if id_ == 0: | |
| continue | |
| yield id_, row |