| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """PMC Open Access Subset.""" |
|
|
| import datetime |
|
|
| import pandas as pd |
|
|
| import datasets |
| from datasets.tasks import LanguageModeling |
|
|
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc. |
| }, |
| year={2020} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The PMC Open Access Subset includes more than 3.4 million journal articles and preprints that are made available under |
| license terms that allow reuse. |
| |
| Not all articles in PMC are available for text mining and other reuse, many have copyright protection, however articles |
| in the PMC Open Access Subset are made available under Creative Commons or similar licenses that generally allow more |
| liberal redistribution and reuse than a traditional copyrighted work. |
| |
| The PMC Open Access Subset is one part of the PMC Article Datasets |
| """ |
|
|
| _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/pmc/tools/openftlist/" |
|
|
| |
| _LICENSE = "" |
|
|
| _URL = "https://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/{subset}/txt/" |
| _SUBSETS = { |
| "commercial": "oa_comm", |
| "non_commercial": "oa_noncomm", |
| "other": "oa_other", |
| } |
| _BASELINE_DATE = "2021-12-17" |
|
|
|
|
| class OpenAccessConfig(datasets.BuilderConfig): |
| """BuilderConfig for the PMC Open Access Subset.""" |
|
|
| def __init__(self, subsets=None, **kwargs): |
| """BuilderConfig for the PMC Open Access Subset. |
| |
| Args: |
| subsets (:obj:`List[str]`): List of subsets/groups to load. |
| **kwargs: Keyword arguments forwarded to super. |
| """ |
| subsets = [subsets] if isinstance(subsets, str) else subsets |
| super().__init__( |
| name="+".join(subsets), **kwargs, |
| ) |
| self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys()) |
|
|
|
|
| class OpenAccess(datasets.GeneratorBasedBuilder): |
| """PMC Open Access Subset.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
| BUILDER_CONFIG_CLASS = OpenAccessConfig |
| BUILDER_CONFIGS = [OpenAccessConfig(subsets="all")] + [OpenAccessConfig(subsets=subset) for subset in _SUBSETS] |
| DEFAULT_CONFIG_NAME = "all" |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "text": datasets.Value("string"), |
| "pmid": datasets.Value("string"), |
| "accession_id": datasets.Value("string"), |
| "license": datasets.Value("string"), |
| "last_updated": datasets.Value("string"), |
| "retracted": datasets.Value("string"), |
| "citation": datasets.Value("string"), |
| } |
| ), |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| task_templates=[LanguageModeling(text_column="text")], |
| ) |
|
|
| def _split_generators(self, dl_manager): |
|
|
| incremental_paths = { |
| "incremental_file_lists": [], |
| "incremental_archives": [] |
| } |
| baseline_file_lists = [] |
| baseline_archives = [] |
|
|
| for subset in self.config.subsets: |
| url = _URL.format(subset=_SUBSETS[subset]) |
| basename = f"{_SUBSETS[subset]}_txt." |
| |
| baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(9)] |
| |
| |
| |
| |
| |
| for baseline in baselines: |
| baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv" |
| try: |
| baseline_file_list = dl_manager.download(baseline_file_list_url) |
| except FileNotFoundError: |
| continue |
| baseline_archive_url = f"{url}{basename}{baseline}.tar.gz" |
| try: |
| baseline_archive = dl_manager.download(baseline_archive_url) |
| except FileNotFoundError: |
| continue |
| baseline_file_lists.append(baseline_file_list) |
| baseline_archives.append(baseline_archive) |
| |
| date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE) |
| incremental_dates = [ |
| (datetime.date.fromisoformat(_BASELINE_DATE) + datetime.timedelta(days=i + 1)).isoformat() |
| for i in range(date_delta.days) |
| ] |
| incrementals = [f"incr.{date}" for date in incremental_dates] |
| incremental_urls = { |
| "incremental_file_lists": [ |
| f"{url}{basename}{incremental}.filelist.csv" for incremental in incrementals |
| ], |
| "incremental_archives": [f"{url}{basename}{incremental}.tar.gz" for incremental in incrementals], |
| } |
| paths = dl_manager.download(incremental_urls) |
| incremental_paths["incremental_file_lists"].extend(paths["incremental_file_lists"]) |
| incremental_paths["incremental_archives"].extend(paths["incremental_archives"]) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "baseline_file_lists": baseline_file_lists, |
| "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives], |
| "incremental_file_lists": incremental_paths["incremental_file_lists"], |
| "incremental_archives": [ |
| dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"] |
| ], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, baseline_file_lists, baseline_archives, incremental_file_lists, incremental_archives): |
| key = 0 |
| |
| for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives): |
| try: |
| baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index") |
| for path, file in baseline_archive: |
| data = baselines.pop(path) |
| content = file.read() |
| try: |
| text = content.decode("utf-8").strip() |
| except UnicodeDecodeError as e: |
| text = content.decode("latin-1").strip() |
| data = { |
| "text": text, |
| "pmid": data["PMID"], |
| "accession_id": data["AccessionID"], |
| "license": data["License"], |
| "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
| "retracted": data["Retracted"], |
| "citation": data["Article Citation"], |
| } |
| yield key, data |
| key += 1 |
| except FileNotFoundError: |
| continue |
| |
| if incremental_file_lists: |
| for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives): |
| import pdb |
|
|
| pdb.set_trace() |
| incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index") |
| for path, file in incremental_archive: |
| data = incrementals.pop(path) |
| content = file.read() |
| try: |
| text = content.decode("utf-8").strip() |
| except UnicodeDecodeError as e: |
| text = content.decode("latin-1").strip() |
| data = { |
| "text": text, |
| "pmid": data["PMID"], |
| "accession_id": data["AccessionID"], |
| "license": data["License"], |
| "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"], |
| "retracted": data["Retracted"], |
| "citation": data["Article Citation"], |
| } |
| yield key, data |
| key += 1 |
|
|