| | import json |
| | import os |
| | import datasets |
| |
|
| | _CITATION = """\ |
| | @article{vidal2019epadb, |
| | title={EpaDB: a database for development of pronunciation assessment systems}, |
| | author={Vidal, Jazmin and Ferrer, Luciana and Brambilla, Leonardo}, |
| | journal={Proc. Interspeech}, |
| | pages={589--593}, |
| | year={2019} |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """\ |
| | EPADB contains curated pronunciation assessment data collected from Spanish-speaking learners of English. |
| | """ |
| |
|
| | class Epadb(datasets.GeneratorBasedBuilder): |
| | """EPADB dataset.""" |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features({ |
| | "utt_id": datasets.Value("string"), |
| | "speaker_id": datasets.Value("string"), |
| | "sentence_id": datasets.Value("string"), |
| | "phone_ids": datasets.Sequence(datasets.Value("string")), |
| | "ref_phonemic_1": datasets.Sequence(datasets.Value("string")), |
| | "annot_1": datasets.Sequence(datasets.Value("string")), |
| | "lab_phonemic_1": datasets.Sequence(datasets.Value("string")), |
| | "error_type": datasets.Sequence(datasets.Value("string")), |
| | "start_mfa": datasets.Sequence(datasets.Value("float")), |
| | "end_mfa": datasets.Sequence(datasets.Value("float")), |
| | "global_1": datasets.Value("float"), |
| | "level_1": datasets.Value("string"), |
| | "gender": datasets.Value("string"), |
| | "duration": datasets.Value("float"), |
| | "sample_rate": datasets.Value("int32"), |
| | "audio": datasets.Audio(sampling_rate=16000), |
| | "transcription": datasets.Value("string"), |
| | }), |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | |
| | train_path = dl_manager.download("train.json") |
| | test_path = dl_manager.download("test.json") |
| | |
| | |
| | with open(train_path) as f: |
| | train_data = json.load(f) |
| | with open(test_path) as f: |
| | test_data = json.load(f) |
| | |
| | |
| | train_audio_files = [example["audio"] for example in train_data] |
| | test_audio_files = [example["audio"] for example in test_data] |
| | |
| | |
| | train_audio_paths = dl_manager.download(train_audio_files) |
| | test_audio_paths = dl_manager.download(test_audio_files) |
| | |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={ |
| | "filepath": train_path, |
| | "audio_files": dict(zip(train_audio_files, train_audio_paths)) |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={ |
| | "filepath": test_path, |
| | "audio_files": dict(zip(test_audio_files, test_audio_paths)) |
| | }, |
| | ), |
| | ] |
| |
|
| | def _generate_examples(self, filepath, audio_files): |
| | with open(filepath, encoding="utf-8") as f: |
| | data = json.load(f) |
| | for idx, example in enumerate(data): |
| | |
| | audio_path = example["audio"] |
| | example["audio"] = audio_files[audio_path] |
| | yield idx, example |