|
|
| import json |
|
|
| import datasets |
| from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
| _CITATION = """\ |
| @article{2016arXiv160605250R, |
| author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, |
| Konstantin and {Liang}, Percy}, |
| title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", |
| journal = {arXiv e-prints}, |
| year = 2016, |
| eid = {arXiv:1606.05250}, |
| pages = {arXiv:1606.05250}, |
| archivePrefix = {arXiv}, |
| eprint = {1606.05250}, |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Stanford Question Answering Dataset (SQuAD) is a reading comprehension \ |
| dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \ |
| articles, where the answer to every question is a segment of text, or span, \ |
| from the corresponding reading passage, or the question might be unanswerable. |
| """ |
|
|
|
|
|
|
| |
| _URLS = { |
| "train": "https://huggingface.co/datasets/AIMClab/ChinaOpen/resolve/main/ChinaOpen-1k.zip" |
| } |
|
|
|
|
|
|
| class ChinaOpenConfig(datasets.BuilderConfig): |
| """BuilderConfig for SQUAD.""" |
|
|
| def __init__(self, **kwargs): |
| """BuilderConfig for SQUAD. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(ChinaOpenConfig, self).__init__(**kwargs) |
|
|
|
|
| class ChinaOpen(datasets.GeneratorBasedBuilder): |
| """SQUAD: The Stanford Question Answering Dataset. Version 1.1.""" |
|
|
| BUILDER_CONFIGS = [ |
| ChinaOpenConfig( |
| name="plain_text", |
| version=datasets.Version("1.0.0", ""), |
| description="Plain text", |
| ), |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "id": datasets.Value("string"), |
| "title": datasets.Value("string"), |
| "context": datasets.Value("string"), |
| "question": datasets.Value("string"), |
| "answers": datasets.features.Sequence( |
| { |
| "text": datasets.Value("string"), |
| "answer_start": datasets.Value("int32"), |
| } |
| ), |
| } |
| ), |
| |
| |
| supervised_keys=None, |
| citation=_CITATION, |
| task_templates=[ |
| QuestionAnsweringExtractive( |
| question_column="question", context_column="context", answers_column="answers" |
| ) |
| ], |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["test"]}), |
| |
| ] |
| ''' |
| def _generate_examples(self, filepath): |
| """This function returns the examples in the raw (text) form.""" |
| logger.info("generating examples from = %s", filepath) |
| key = 0 |
| with open(filepath, encoding="utf-8") as f: |
| squad = json.load(f) |
| for article in squad["data"]: |
| title = article.get("title", "") |
| for paragraph in article["paragraphs"]: |
| context = paragraph["context"] # do not strip leading blank spaces GH-2585 |
| for qa in paragraph["qas"]: |
| answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
| answers = [answer["text"] for answer in qa["answers"]] |
| # Features currently used are "context", "question", and "answers". |
| # Others are extracted here for the ease of future expansions. |
| yield key, { |
| "title": title, |
| "context": context, |
| "question": qa["question"], |
| "id": qa["id"], |
| "answers": { |
| "answer_start": answer_starts, |
| "text": answers, |
| }, |
| } |
| key += 1 |
| ''' |