diarray commited on
Commit
973fe3d
·
1 Parent(s): c1246c0

Add custom dataset loading script

Browse files
Files changed (1) hide show
  1. bam-asr-all.py +195 -0
bam-asr-all.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright 2025 RobotsMali AI4D Lab.
3
+
4
+ Licensed under the Creative Commons Attribution 4.0 International License (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ https://creativecommons.org/licenses/by/4.0/
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
17
+ import csv
18
+ import datasets
19
+ from datasets import Split, SplitGenerator
20
+
21
+ # -----------------------
22
+ # 1. Basic meta-infos
23
+ # -----------------------
24
+ _CITATION = """\
25
+ @inproceedings{bam_asr_all_2025,
26
+ title={Bam-ASR-All Audio Dataset},
27
+ author={RobotsMali AI4D Lab},
28
+ year={2025},
29
+ publisher={Hugging Face}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """
34
+ The **Bam-ASR-All** dataset is a combined Bambara speech dataset featuring multiple subsets:
35
+ - Oza-Mali-Pense
36
+ - Jeli-ASR
37
+ - RT-Data-Collection
38
+ All subsets contain audio samples in Bambara along with transcriptions and (potentially)
39
+ French translations.
40
+ """
41
+
42
+ _HOMEPAGE = "https://huggingface.co/datasets/RobotsMali/bam-asr-all"
43
+ _LICENSE = "CC-BY-4.0"
44
+ _VERSION = datasets.Version("1.0.0")
45
+
46
+ # NOTE: No trailing slash here
47
+ _BASE_URL = "https://huggingface.co/datasets/RobotsMali/bam-asr-all/resolve/main"
48
+
49
+ # -----------------------
50
+ # 2. Config + Builder
51
+ # -----------------------
52
+ class BamASRAllConfig(datasets.BuilderConfig):
53
+ """BuilderConfig for different subsets of Bam-ASR-All dataset."""
54
+
55
+ class BamASRAll(datasets.GeneratorBasedBuilder):
56
+ """
57
+ This class defines how to load and parse the Bam-ASR-All dataset
58
+ from metadata.csv + audio files on the Hub.
59
+ """
60
+
61
+ # 2a. Define your subsets (configs)
62
+ BUILDER_CONFIGS = [
63
+ BamASRAllConfig(
64
+ name="oza-mali-pense",
65
+ version=_VERSION,
66
+ description="Load only the Oza-Mali-Pense subset (files under oza-mali-pense/).",
67
+ ),
68
+ BamASRAllConfig(
69
+ name="jeli-asr",
70
+ version=_VERSION,
71
+ description="Load only the Jeli-ASR subset (files under jeli-asr/).",
72
+ ),
73
+ BamASRAllConfig(
74
+ name="rt-data-collection",
75
+ version=_VERSION,
76
+ description="Load only the RT-Data-Collection subset (files under rt-data-collection/).",
77
+ ),
78
+ # The "combined" option for everything can also be done
79
+ BamASRAllConfig(
80
+ name="bam-asr-all", # The dataset's default name
81
+ version=_VERSION,
82
+ description="Combine oza-mali-pense, jeli-asr, and rt-data-collection (all rows).",
83
+ ),
84
+ ]
85
+
86
+ # 2b. Default subset name if none specified
87
+ DEFAULT_CONFIG_NAME = "bam-asr-all"
88
+
89
+ def _info(self):
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=datasets.Features(
93
+ {
94
+ "audio": datasets.Audio(sampling_rate=16_000),
95
+ "duration": datasets.Value("float32"),
96
+ "bam": datasets.Value("string"),
97
+ "french": datasets.Value("string"),
98
+ }
99
+ ),
100
+ supervised_keys=None,
101
+ homepage=_HOMEPAGE,
102
+ license=_LICENSE,
103
+ citation=_CITATION,
104
+ )
105
+
106
+ # -----------------------
107
+ # 3. Splits
108
+ # -----------------------
109
+ def _split_generators(self, dl_manager):
110
+ """
111
+ 1) Download 'metadata.csv' from the Hub by specifying its raw URL.
112
+ 2) We'll then yield two splits (TRAIN, TEST) by reading that CSV
113
+ and filtering rows by '/train/' or '/test/' in file paths.
114
+ """
115
+ metadata_url = f"{_BASE_URL}/metadata.csv"
116
+ local_metadata_path = dl_manager.download(metadata_url)
117
+
118
+ return [
119
+ SplitGenerator(
120
+ name=Split.TRAIN,
121
+ gen_kwargs={
122
+ "metadata_path": local_metadata_path,
123
+ "split": "train",
124
+ "dl_manager": dl_manager,
125
+ },
126
+ ),
127
+ SplitGenerator(
128
+ name=Split.TEST,
129
+ gen_kwargs={
130
+ "metadata_path": local_metadata_path,
131
+ "split": "test",
132
+ "dl_manager": dl_manager,
133
+ },
134
+ ),
135
+ ]
136
+
137
+ # -----------------------
138
+ # 4. Generate examples
139
+ # -----------------------
140
+ def _generate_examples(self, metadata_path, split, dl_manager):
141
+ """
142
+ Read metadata.csv row-by-row, filter by:
143
+ - the config name (oza-mali-pense, jeli-asr, rt-data-collection, or all)
144
+ - 'train' vs 'test' in file path
145
+ Then download each audio file from the Hub, yield local path + metadata.
146
+ """
147
+ audios_to_download = []
148
+ metadata_dict = {}
149
+
150
+ with open(metadata_path, "r", encoding="utf-8") as f:
151
+ reader = csv.DictReader(f)
152
+ for idx, row in enumerate(reader):
153
+ file_path = row["file_name"] # e.g. "jeli-asr/train/.../some.wav"
154
+
155
+ # Filter by config name
156
+ if self.config.name == "oza-mali-pense":
157
+ if "oza-mali-pense/" not in file_path:
158
+ continue
159
+ elif self.config.name == "jeli-asr":
160
+ if "jeli-asr/" not in file_path:
161
+ continue
162
+ elif self.config.name == "rt-data-collection":
163
+ if "rt-data-collection/" not in file_path:
164
+ continue
165
+ elif self.config.name == "bam-asr-all":
166
+ # Keep all rows
167
+ pass
168
+
169
+ # Filter by split (train/test)
170
+ if split == "train" and "/train/" not in file_path:
171
+ continue
172
+ if split == "test" and "/test/" not in file_path:
173
+ continue
174
+
175
+ # Build the raw URL for this audio file
176
+ audio_url = f"{_BASE_URL}/{file_path}"
177
+ audios_to_download.append(audio_url)
178
+
179
+ # Store minimal metadata in a dictionary
180
+ metadata_dict[audio_url] = {
181
+ "duration": float(row["duration"]),
182
+ "bam": row["bam"],
183
+ "french": row["french"],
184
+ }
185
+
186
+ # Download the audios. dl_manager returns the local paths in the cache.
187
+ local_audio_paths = dl_manager.download(audios_to_download)
188
+ for idx, audio_url in enumerate(audios_to_download):
189
+ local_audio_path = local_audio_paths[idx]
190
+ yield idx, {
191
+ "audio": local_audio_path, # local path for datasets.Audio
192
+ "duration": metadata_dict[audio_url]["duration"],
193
+ "bam": metadata_dict[audio_url]["bam"],
194
+ "french": metadata_dict[audio_url]["french"],
195
+ }