File size: 3,533 Bytes
db8bd28
099c6e5
db8bd28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
099c6e5
db8bd28
 
 
099c6e5
 
 
 
 
 
 
 
 
 
 
 
 
 
db8bd28
 
 
099c6e5
 
 
 
db8bd28
 
 
099c6e5
 
 
 
db8bd28
 
 
099c6e5
db8bd28
 
 
099c6e5
 
 
db8bd28
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import json
import os
import datasets

_CITATION = """\
@article{vidal2019epadb,
  title={EpaDB: a database for development of pronunciation assessment systems},
  author={Vidal, Jazmin and Ferrer, Luciana and Brambilla, Leonardo},
  journal={Proc. Interspeech},
  pages={589--593},
  year={2019}
}
"""

_DESCRIPTION = """\
EPADB contains curated pronunciation assessment data collected from Spanish-speaking learners of English.
"""

class Epadb(datasets.GeneratorBasedBuilder):
    """EPADB dataset."""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "utt_id": datasets.Value("string"),
                "speaker_id": datasets.Value("string"),
                "sentence_id": datasets.Value("string"),
                "phone_ids": datasets.Sequence(datasets.Value("string")),
                "ref_phonemic_1": datasets.Sequence(datasets.Value("string")),
                "annot_1": datasets.Sequence(datasets.Value("string")),
                "lab_phonemic_1": datasets.Sequence(datasets.Value("string")),
                "error_type": datasets.Sequence(datasets.Value("string")),
                "start_mfa": datasets.Sequence(datasets.Value("float")),
                "end_mfa": datasets.Sequence(datasets.Value("float")),
                "global_1": datasets.Value("float"),
                "level_1": datasets.Value("string"),
                "gender": datasets.Value("string"),
                "duration": datasets.Value("float"),
                "sample_rate": datasets.Value("int32"),
                "audio": datasets.Audio(sampling_rate=16000),
                "transcription": datasets.Value("string"),
            }),
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # First, download the JSON files
        train_path = dl_manager.download("train.json")
        test_path = dl_manager.download("test.json")
        
        # Read JSON to get list of all audio files
        with open(train_path) as f:
            train_data = json.load(f)
        with open(test_path) as f:
            test_data = json.load(f)
        
        # Collect all unique audio files referenced
        train_audio_files = [example["audio"] for example in train_data]
        test_audio_files = [example["audio"] for example in test_data]
        
        # Download all audio files
        train_audio_paths = dl_manager.download(train_audio_files)
        test_audio_paths = dl_manager.download(test_audio_files)
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": train_path,
                    "audio_files": dict(zip(train_audio_files, train_audio_paths))
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": test_path,
                    "audio_files": dict(zip(test_audio_files, test_audio_paths))
                },
            ),
        ]

    def _generate_examples(self, filepath, audio_files):
        with open(filepath, encoding="utf-8") as f:
            data = json.load(f)
            for idx, example in enumerate(data):
                # Replace the audio path with the downloaded local path
                audio_path = example["audio"]
                example["audio"] = audio_files[audio_path]
                yield idx, example