kimetsu commited on
Commit
76dc6d0
·
1 Parent(s): e69964a

Upload timit_asr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. timit_asr.py +186 -0
timit_asr.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """TIMIT automatic speech recognition dataset."""
18
+
19
+
20
+ import os
21
+ from pathlib import Path
22
+
23
+ import datasets
24
+ from datasets.tasks import AutomaticSpeechRecognition
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{
29
+ title={TIMIT Acoustic-Phonetic Continuous Speech Corpus},
30
+ author={Garofolo, John S., et al},
31
+ ldc_catalog_no={LDC93S1},
32
+ DOI={https://doi.org/10.35111/17gk-bn40},
33
+ journal={Linguistic Data Consortium, Philadelphia},
34
+ year={1983}
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = """\
39
+ The TIMIT corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
40
+ and for the evaluation of automatic speech recognition systems.
41
+ TIMIT contains high quality recordings of 630 individuals/speakers with 8 different American English dialects,
42
+ with each individual reading upto 10 phonetically rich sentences.
43
+ More info on TIMIT dataset can be understood from the "README" which can be found here:
44
+ https://catalog.ldc.upenn.edu/docs/LDC93S1/readme.txt
45
+ """
46
+
47
+ _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC93S1"
48
+
49
+
50
+ class TimitASRConfig(datasets.BuilderConfig):
51
+ """BuilderConfig for TimitASR."""
52
+
53
+ def __init__(self, **kwargs):
54
+ """
55
+ Args:
56
+ data_dir: `string`, the path to the folder containing the files in the
57
+ downloaded .tar
58
+ citation: `string`, citation for the data set
59
+ url: `string`, url for information about the data set
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ super(TimitASRConfig, self).__init__(version=datasets.Version("2.0.1", ""), **kwargs)
63
+
64
+
65
+ class TimitASR(datasets.GeneratorBasedBuilder):
66
+ """TimitASR dataset."""
67
+
68
+ BUILDER_CONFIGS = [TimitASRConfig(name="clean", description="'Clean' speech.")]
69
+
70
+ @property
71
+ def manual_download_instructions(self):
72
+ return (
73
+ "To use TIMIT you have to download it manually. "
74
+ "Please create an account and download the dataset from https://catalog.ldc.upenn.edu/LDC93S1 \n"
75
+ "Then extract all files in one folder and load the dataset with: "
76
+ "`datasets.load_dataset('timit_asr', data_dir='path/to/folder/folder_name')`"
77
+ )
78
+
79
+ def _info(self):
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=datasets.Features(
83
+ {
84
+ "file": datasets.Value("string"),
85
+ "audio": datasets.Audio(sampling_rate=16_000),
86
+ "text": datasets.Value("string"),
87
+ "phonetic_detail": datasets.Sequence(
88
+ {
89
+ "start": datasets.Value("int64"),
90
+ "stop": datasets.Value("int64"),
91
+ "utterance": datasets.Value("string"),
92
+ }
93
+ ),
94
+ "word_detail": datasets.Sequence(
95
+ {
96
+ "start": datasets.Value("int64"),
97
+ "stop": datasets.Value("int64"),
98
+ "utterance": datasets.Value("string"),
99
+ }
100
+ ),
101
+ "dialect_region": datasets.Value("string"),
102
+ "sentence_type": datasets.Value("string"),
103
+ "speaker_id": datasets.Value("string"),
104
+ "id": datasets.Value("string"),
105
+ }
106
+ ),
107
+ supervised_keys=("file", "text"),
108
+ homepage=_HOMEPAGE,
109
+ citation=_CITATION,
110
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
111
+ )
112
+
113
+ def _split_generators(self, dl_manager):
114
+
115
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
116
+
117
+ if not os.path.exists(data_dir):
118
+ raise FileNotFoundError(
119
+ f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('timit_asr', data_dir=...)` that includes files unzipped from the TIMIT zip. Manual download instructions: {self.manual_download_instructions}"
120
+ )
121
+
122
+ return [
123
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}),
124
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}),
125
+ ]
126
+
127
+ def _generate_examples(self, split, data_dir):
128
+ """Generate examples from TIMIT archive_path based on the test/train csv information."""
129
+ # Iterating the contents of the data to extract the relevant information
130
+ wav_paths = sorted(Path(data_dir).glob(f"**/{split}/**/*.wav"))
131
+ wav_paths = wav_paths if wav_paths else sorted(Path(data_dir).glob(f"**/{split.upper()}/**/*.WAV"))
132
+ for key, wav_path in enumerate(wav_paths):
133
+
134
+ # extract transcript
135
+ txt_path = with_case_insensitive_suffix(wav_path, ".txt")
136
+ with txt_path.open(encoding="utf-8") as op:
137
+ transcript = " ".join(op.readlines()[0].split()[2:]) # first two items are sample number
138
+
139
+ # extract phonemes
140
+ phn_path = with_case_insensitive_suffix(wav_path, ".phn")
141
+ with phn_path.open(encoding="utf-8") as op:
142
+ phonemes = [
143
+ {
144
+ "start": i.split(" ")[0],
145
+ "stop": i.split(" ")[1],
146
+ "utterance": " ".join(i.split(" ")[2:]).strip(),
147
+ }
148
+ for i in op.readlines()
149
+ ]
150
+
151
+ # extract words
152
+ wrd_path = with_case_insensitive_suffix(wav_path, ".wrd")
153
+ with wrd_path.open(encoding="utf-8") as op:
154
+ words = [
155
+ {
156
+ "start": i.split(" ")[0],
157
+ "stop": i.split(" ")[1],
158
+ "utterance": " ".join(i.split(" ")[2:]).strip(),
159
+ }
160
+ for i in op.readlines()
161
+ ]
162
+
163
+ dialect_region = wav_path.parents[1].name
164
+ sentence_type = wav_path.name[0:2]
165
+ speaker_id = wav_path.parents[0].name[1:]
166
+ id_ = wav_path.stem
167
+
168
+ example = {
169
+ "file": str(wav_path),
170
+ "audio": str(wav_path),
171
+ "text": transcript,
172
+ "phonetic_detail": phonemes,
173
+ "word_detail": words,
174
+ "dialect_region": dialect_region,
175
+ "sentence_type": sentence_type,
176
+ "speaker_id": speaker_id,
177
+ "id": id_,
178
+ }
179
+
180
+ yield key, example
181
+
182
+
183
+ def with_case_insensitive_suffix(path: Path, suffix: str):
184
+ path = path.with_suffix(suffix.lower())
185
+ path = path if path.exists() else path.with_suffix(suffix.upper())
186
+ return path