| """mC4 dataset based on Common Crawl.""" |
|
|
|
|
| import gzip |
| import json |
| import warnings |
|
|
| import datasets |
|
|
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
|
|
| _DESCRIPTION = """\ |
| A colossal, cleaned version of Common Crawl's web crawl corpus. |
| |
| Based on Common Crawl dataset: "https://commoncrawl.org". |
| |
| This is the processed version of Google's mC4 dataset by AllenAI. |
| """ |
|
|
| _CITATION = """ |
| @article{2019t5, |
| author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu}, |
| title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer}, |
| journal = {arXiv e-prints}, |
| year = {2019}, |
| archivePrefix = {arXiv}, |
| eprint = {1910.10683}, |
| } |
| """ |
|
|
| _URL = "https://github.com/allenai/allennlp/discussions/5056" |
|
|
| |
| _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/main/multilingual/c4-{language}.tfrecord-{index:05d}-of-{n_shards:05d}.json.gz" |
|
|
|
|
|
|
| _LANGUAGES = ["fr", "en"] |
|
|
| _N_SHARDS_PER_SPLIT = { |
| "en": {"train": 11264, "validation": 128}, |
| "fr": {"train": 2048, "validation": 16}, |
| } |
|
|
| _N_SHARDS_PER_SPLIT_CUSTOMISED = { |
| "en": {"train": 2, "validation": 10}, |
| "fr": {"train": 2, "validation": 10}, |
| } |
|
|
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
|
|
|
|
| class Mc4Config(datasets.BuilderConfig): |
| """BuilderConfig for mC4.""" |
|
|
| def __init__(self, *args, languages, **kwargs): |
| """BuilderConfig for mC4. |
| Args: |
| languages (:obj:`List[str]`): list of languages to load |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super().__init__( |
| *args, |
| name="+".join(languages), |
| **kwargs, |
| ) |
| self.languages = languages |
|
|
|
|
| class Mc4(datasets.GeneratorBasedBuilder): |
| """mC4, a colossal, cleaned version of Common Crawl's web crawl corpus.""" |
|
|
| BUILDER_CONFIGS = [Mc4Config(languages=[lang]) for lang in _LANGUAGES] |
| BUILDER_CONFIG_CLASS = Mc4Config |
|
|
| def _info(self): |
| warnings.warn( |
| "Dataset 'mc4' is deprecated and will be deleted. Use 'allenai/c4' instead.", |
| FutureWarning, |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "text": datasets.Value("string"), |
| "timestamp": datasets.Value("string"), |
| "url": datasets.Value("string"), |
| } |
| ), |
| supervised_keys=None, |
| homepage=_URL, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| data_urls = {} |
| for split in ["train", "validation"]: |
| data_urls[split] = [ |
| _DATA_URL.format( |
| language=lang, |
| split_suffix="-validation" if split == "validation" else "", |
| index=index, |
| n_shards=_N_SHARDS_PER_SPLIT[lang][split], |
| ) |
| for lang in self.config.languages |
| for index in range(_N_SHARDS_PER_SPLIT_CUSTOMISED[lang][split]) |
| ] |
| train_downloaded_files = dl_manager.download(data_urls["train"]) |
| validation_downloaded_files = dl_manager.download(data_urls["validation"]) |
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} |
| ), |
| ] |
|
|
| def _generate_examples(self, filepaths): |
| """This function returns the examples in the raw (text) form by iterating on all the files.""" |
| id_ = 0 |
| for filepath in filepaths: |
| logger.info("generating examples from = %s", filepath) |
| with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
| for line in f: |
| if line: |
| example = json.loads(line) |
| yield id_, example |
| id_ += 1 |
|
|