| """TODO(wiki_split): Add a description here.""" |
|
|
|
|
| import csv |
| import os |
|
|
| import datasets |
|
|
|
|
| |
| _CITATION = """\ |
| @InProceedings{BothaEtAl2018, |
| title = {{Learning To Split and Rephrase From Wikipedia Edit History}}, |
| author = {Botha, Jan A and Faruqui, Manaal and Alex, John and Baldridge, Jason and Das, Dipanjan}, |
| booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing}, |
| pages = {to appear}, |
| note = {arXiv preprint arXiv:1808.09468}, |
| year = {2018} |
| } |
| """ |
|
|
| |
| _DESCRIPTION = """\ |
| One million English sentences, each split into two sentences that together preserve the original meaning, extracted from Wikipedia |
| Google's WikiSplit dataset was constructed automatically from the publicly available Wikipedia revision history. Although |
| the dataset contains some inherent noise, it can serve as valuable training data for models that split or merge sentences. |
| """ |
|
|
| _URL = "https://github.com/google-research-datasets/wiki-split/raw/master/" |
| _URLS = { |
| "train": _URL + "train.tsv.zip", |
| "test": _URL + "test.tsv", |
| "dev": _URL + "validation.tsv", |
| } |
|
|
|
|
| class WikiSplit(datasets.GeneratorBasedBuilder): |
| """TODO(wiki_split): Short description of my dataset.""" |
|
|
| |
| VERSION = datasets.Version("0.1.0") |
|
|
| def _info(self): |
| |
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=datasets.Features( |
| { |
| "complex_sentence": datasets.Value("string"), |
| "simple_sentence_1": datasets.Value("string"), |
| "simple_sentence_2": datasets.Value("string"), |
| |
| } |
| ), |
| |
| |
| |
| supervised_keys=None, |
| |
| homepage="https://dataset-homepage/", |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| |
| |
| |
| urls_to_download = _URLS |
| dl_dir = dl_manager.download_and_extract(urls_to_download) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={"filepath": os.path.join(dl_dir["train"], "train.tsv")}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={"filepath": dl_dir["test"]}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={"filepath": dl_dir["dev"]}, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath): |
| """Yields examples.""" |
| |
| with open(filepath, encoding="utf-8") as f: |
| data = csv.reader(f, delimiter="\t") |
| |
|
|
| for id_, row in enumerate(data): |
| yield id_, { |
| "complex_sentence": row[0], |
| "simple_sentence_1": row[1].split("<::::>")[0], |
| "simple_sentence_2": row[1].split("<::::>")[1], |
| } |
|
|