yunusskeete commited on
Commit
475a432
·
verified ·
1 Parent(s): 0dc52a6

Create librispeech_asr-timestamped.py

Browse files
Files changed (1) hide show
  1. librispeech_asr-timestamped.py +330 -0
librispeech_asr-timestamped.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Librispeech automatic speech recognition dataset."""
18
+ import csv
19
+ import os
20
+
21
+ import datasets
22
+ # from datasets.tasks import AutomaticSpeechRecognition
23
+
24
+ from huggingface_hub import list_repo_files
25
+
26
+
27
+ import pyarrow.parquet as pq
28
+ import pyarrow as pa
29
+
30
+
31
+ _CITATION = """\
32
+ @inproceedings{panayotov2015librispeech,
33
+ title={Librispeech: an ASR corpus based on public domain audio books},
34
+ author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
35
+ booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
36
+ pages={5206--5210},
37
+ year={2015},
38
+ organization={IEEE}
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
44
+ prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
45
+ audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
46
+ """
47
+
48
+ _URL = "http://www.openslr.org/12"
49
+ _TRANSCRIPT_URL = "https://huggingface.co/datasets/distil-whisper/whisper_transcriptions_greedy_timestamped/resolve/main/librispeech_asr/"
50
+
51
+ _DATA_REPO_ID = "sanchit-gandhi/librispeech-data"
52
+
53
+ _TRANSCRIPT_URLS = {
54
+ "clean": {
55
+ "dev": _TRANSCRIPT_URL + "validation-clean-transcription.csv",
56
+ "test": _TRANSCRIPT_URL + "test-clean-transcription.csv",
57
+ "train.100": _TRANSCRIPT_URL + "train-clean-100-transcription.csv",
58
+ "train.360": _TRANSCRIPT_URL + "train-clean-360-transcription.csv",
59
+ },
60
+ "other": {
61
+ "test": _TRANSCRIPT_URL + "test-other-transcription.csv",
62
+ "dev": _TRANSCRIPT_URL + "validation-other-transcription.csv",
63
+ "train.500": _TRANSCRIPT_URL + "train-other-500-transcription.csv",
64
+ },
65
+ "all": {
66
+ "dev.clean": _TRANSCRIPT_URL + "validation-clean-transcription.csv",
67
+ "dev.other": _TRANSCRIPT_URL + "validation-other-transcription.csv",
68
+ "test.clean": _TRANSCRIPT_URL + "test-clean-transcription.csv",
69
+ "test.other": _TRANSCRIPT_URL + "test-other-transcription.csv",
70
+ "train.clean.100": _TRANSCRIPT_URL + "train-clean-100-transcription.csv",
71
+ "train.clean.360": _TRANSCRIPT_URL + "train-clean-360-transcription.csv",
72
+ "train.other.500": _TRANSCRIPT_URL + "train-other-500-transcription.csv",
73
+ },
74
+ }
75
+
76
+
77
+ class LibrispeechASRConfig(datasets.BuilderConfig):
78
+ """BuilderConfig for LibriSpeechASR."""
79
+
80
+ def __init__(self, **kwargs):
81
+ """
82
+ Args:
83
+ data_dir: `string`, the path to the folder containing the files in the
84
+ downloaded .tar
85
+ citation: `string`, citation for the data set
86
+ url: `string`, url for information about the data set
87
+ **kwargs: keyword arguments forwarded to super.
88
+ """
89
+ super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
90
+
91
+
92
+ class LibriSpeechASR(datasets.ArrowBasedBuilder):
93
+ """Librispeech dataset."""
94
+
95
+ DEFAULT_WRITER_BATCH_SIZE = 256
96
+ DEFAULT_CONFIG_NAME = "all"
97
+ BUILDER_CONFIGS = [
98
+ LibrispeechASRConfig(name="clean", description="'Clean' speech."),
99
+ LibrispeechASRConfig(name="other", description="'Other', more challenging, speech."),
100
+ LibrispeechASRConfig(name="all", description="Combined clean and other dataset."),
101
+ ]
102
+
103
+ def _info(self):
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=datasets.Features(
107
+ {
108
+ "file": datasets.Value("string"),
109
+ "audio": datasets.Audio(sampling_rate=16_000),
110
+ "text": datasets.Value("string"),
111
+ "speaker_id": datasets.Value("int64"),
112
+ "chapter_id": datasets.Value("int64"),
113
+ "id": datasets.Value("string"),
114
+ "whisper_transcript": datasets.Value("string"),
115
+ }
116
+ ),
117
+ supervised_keys=("file", "text"),
118
+ homepage=_URL,
119
+ citation=_CITATION,
120
+ # task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
121
+ task_templates=[],
122
+ )
123
+
124
+ def _split_generators(self, dl_manager):
125
+ data_repo_download = f"https://huggingface.co/datasets/{_DATA_REPO_ID}/resolve/main/"
126
+ all_files = list_repo_files(_DATA_REPO_ID, repo_type="dataset")
127
+
128
+ train_clean_100_files = [file for file in all_files if file.startswith("data/train.clean.100")]
129
+ train_clean_360_files = [file for file in all_files if file.startswith("data/train.clean.360")]
130
+ train_other_500_files = [file for file in all_files if file.startswith("data/train.other.500")]
131
+ validation_clean_files = [file for file in all_files if file.startswith("data/validation.clean")]
132
+ validation_other_files = [file for file in all_files if file.startswith("data/validation.other")]
133
+ test_clean_files = [file for file in all_files if file.startswith("data/test.clean")]
134
+ test_other_files = [file for file in all_files if file.startswith("data/test.other")]
135
+
136
+ split_to_ids = {
137
+ "train.clean.100": train_clean_100_files,
138
+ "train.clean.360": train_clean_360_files,
139
+ "train.other.500": train_other_500_files,
140
+ "dev.clean": validation_clean_files,
141
+ "dev.other": validation_other_files,
142
+ "test.clean": test_clean_files,
143
+ "test.other": test_other_files,
144
+ }
145
+
146
+ dl_urls = {}
147
+ for split, split_ids in split_to_ids.items():
148
+ dl_urls[split] = [data_repo_download + source_id for source_id in split_ids]
149
+ archive_paths = dl_manager.download(dl_urls)
150
+
151
+ local_extracted_archive_paths = (
152
+ dl_manager.extract(archive_paths)
153
+ if not dl_manager.is_streaming
154
+ else {split: [None] * len(archive_paths[split]) for split in split_to_ids}
155
+ )
156
+
157
+ transcript_archive_path = dl_manager.download(_TRANSCRIPT_URLS[self.config.name])
158
+ # (Optional) In non-streaming mode, we can extract the archive locally to have actual local transcription files:
159
+ # local_extracted_transcript_archive = dl_manager.extract(transcript_archive_path) if not dl_manager.is_streaming else {}
160
+
161
+ if self.config.name == "clean":
162
+ train_splits = [
163
+ datasets.SplitGenerator(
164
+ name="train.100",
165
+ gen_kwargs={
166
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("train.clean.100"),
167
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train.clean.100"]],
168
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("train.100"),
169
+ "transcript_files": transcript_archive_path["train.100"],
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name="train.360",
174
+ gen_kwargs={
175
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("train.360"),
176
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train.360"]],
177
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("train.360"),
178
+ "transcript_files": transcript_archive_path["train.360"],
179
+ },
180
+ ),
181
+ ]
182
+ dev_splits = [
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.VALIDATION,
185
+ gen_kwargs={
186
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("dev"),
187
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["dev"]],
188
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("dev"),
189
+ "transcript_files": transcript_archive_path["dev"],
190
+ },
191
+ )
192
+ ]
193
+ test_splits = [
194
+ datasets.SplitGenerator(
195
+ name=datasets.Split.TEST,
196
+ gen_kwargs={
197
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("test"),
198
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["test"]],
199
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("test"),
200
+ "transcript_files": transcript_archive_path["test"],
201
+ },
202
+ )
203
+ ]
204
+ elif self.config.name == "other":
205
+ train_splits = [
206
+ datasets.SplitGenerator(
207
+ name="train.500",
208
+ gen_kwargs={
209
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("train.500"),
210
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train.500"]],
211
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("train.500"),
212
+ "transcript_files": transcript_archive_path["train.500"],
213
+ },
214
+ )
215
+ ]
216
+ dev_splits = [
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.VALIDATION,
219
+ gen_kwargs={
220
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("dev"),
221
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["dev"]],
222
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("dev"),
223
+ "transcript_files": transcript_archive_path["dev"],
224
+ },
225
+ )
226
+ ]
227
+ test_splits = [
228
+ datasets.SplitGenerator(
229
+ name=datasets.Split.TEST,
230
+ gen_kwargs={
231
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("test"),
232
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["test"]],
233
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("test"),
234
+ "transcript_files": transcript_archive_path["test"],
235
+ },
236
+ )
237
+ ]
238
+ elif self.config.name == "all":
239
+ train_splits = [
240
+ datasets.SplitGenerator(
241
+ name="train.clean.100",
242
+ gen_kwargs={
243
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("train.clean.100"),
244
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train.clean.100"]],
245
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("train.clean.100"),
246
+ "transcript_files": transcript_archive_path["train.clean.100"],
247
+ },
248
+ ),
249
+ datasets.SplitGenerator(
250
+ name="train.clean.360",
251
+ gen_kwargs={
252
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("train.clean.360"),
253
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train.clean.360"]],
254
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("train.clean.360"),
255
+ "transcript_files": transcript_archive_path["train.clean.360"],
256
+ },
257
+ ),
258
+ datasets.SplitGenerator(
259
+ name="train.other.500",
260
+ gen_kwargs={
261
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("train.other.500"),
262
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["train.other.500"]],
263
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("train.other.500"),
264
+ "transcript_files": transcript_archive_path["train.other.500"],
265
+ },
266
+ ),
267
+ ]
268
+ dev_splits = [
269
+ datasets.SplitGenerator(
270
+ name="validation.clean",
271
+ gen_kwargs={
272
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("dev.clean"),
273
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["dev.clean"]],
274
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("dev.clean"),
275
+ "transcript_files": transcript_archive_path["dev.clean"],
276
+ },
277
+ ),
278
+ datasets.SplitGenerator(
279
+ name="validation.other",
280
+ gen_kwargs={
281
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("dev.other"),
282
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["dev.other"]],
283
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("dev.other"),
284
+ "transcript_files": transcript_archive_path["dev.other"],
285
+ },
286
+ ),
287
+ ]
288
+ test_splits = [
289
+ datasets.SplitGenerator(
290
+ name="test.clean",
291
+ gen_kwargs={
292
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("test.clean"),
293
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["test.clean"]],
294
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("test.clean"),
295
+ "transcript_files": transcript_archive_path["test.clean"],
296
+ },
297
+ ),
298
+ datasets.SplitGenerator(
299
+ name="test.other",
300
+ gen_kwargs={
301
+ "local_extracted_archive_paths": local_extracted_archive_paths.get("test.other"),
302
+ "archives": [dl_manager.iter_files(path) for path in archive_paths["test.other"]],
303
+ #"local_extracted_transcript_archive": local_extracted_transcript_archive.get("test.other"),
304
+ "transcript_files": transcript_archive_path["test.other"],
305
+ },
306
+ ),
307
+ ]
308
+
309
+ return train_splits + dev_splits + test_splits
310
+
311
+ def _generate_tables(self, local_extracted_archive_paths, archives, transcript_files):
312
+ whisper_transcriptions = dict()
313
+ with open(transcript_files, encoding="utf-8") as f:
314
+ reader = csv.DictReader(f, delimiter=",")
315
+ for line in reader:
316
+ whisper_transcriptions[line["file_id"]] = line["whisper_transcript"]
317
+
318
+ idx = 0
319
+ for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
320
+ # Here we iterate over all the files within the TAR archive:
321
+ for audio_file in archive:
322
+ with open(audio_file, "rb") as f:
323
+ pf = pq.ParquetFile(f)
324
+ for record_batch in pf.iter_batches():
325
+ pa_table = pa.Table.from_batches([record_batch])
326
+ whisper_transcript = [whisper_transcriptions.get(str(file_id), None) for file_id in pa_table["id"]]
327
+ whisper_transcript = pa.array(whisper_transcript, pa.string())
328
+ pa_table = pa_table.append_column("whisper_transcript", whisper_transcript)
329
+ yield idx, pa_table
330
+ idx += 1