|
import os |
|
import csv |
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{kjartansson-etal-sltu2018, |
|
title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}}, |
|
author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha}, |
|
booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)}, |
|
year = {2018}, |
|
address = {Gurugram, India}, |
|
month = aug, |
|
pages = {52--55}, |
|
URL = {http://dx.doi.org/10.21437/SLTU.2018-11} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file. |
|
The data set has been manually quality checked, but there might still be errors. |
|
The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.openslr.org/54/" |
|
_LICENSE = "license:cc-by-sa-4.0" |
|
|
|
_URLS = { |
|
'cleaned': { |
|
"index_file": "https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/utt_spk_text_clean.tsv", |
|
"zipfiles": [ |
|
f"https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/asr_nepali_{k}.zip" |
|
for k in [*range(10), *'abcdef'] |
|
], |
|
}, |
|
'original': { |
|
"index_file": "https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/utt_spk_text_orig.tsv", |
|
"zipfiles": [ |
|
f"https://www.openslr.org/resources/54/asr_nepali_{k}.zip" |
|
for k in [*range(10), *'abcdef'] |
|
], |
|
}, |
|
} |
|
|
|
|
|
class OpenslrNepaliAsrCleaned(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="original", version=VERSION, description="All original utterances, speaker id and transcription from Openslr Large Nepali ASR Dataset"), |
|
datasets.BuilderConfig(name="cleaned", version=VERSION, description="All cleaned utterances, speaker id and transcription from Openslr Large Nepali ASR Dataset"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "original" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"utterance_id": datasets.Value("string"), |
|
"speaker_id": datasets.Value("string"), |
|
"utterance": datasets.Audio(sampling_rate=16000), |
|
"transcription": datasets.Value("string"), |
|
"num_frames": datasets.Value("int32"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=[datasets.tasks.AutomaticSpeechRecognition( |
|
audio_column="utterance", transcription_column="transcription" |
|
)] |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
index_file = dl_manager.download(_URLS[self.config.name]['index_file']) |
|
zip_paths = [item for sublist in [ |
|
dl_manager.download( |
|
_URLS[self.config.name]['zipfiles'][i:i+4] |
|
) for i in range(0, len(_URLS[self.config.name]['zipfiles']), 4) |
|
] for item in sublist] |
|
audio_paths = dict(zip([url[-5] for url in _URLS[self.config.name]["zipfiles"]], |
|
dl_manager.extract(zip_paths))) |
|
for path in zip_paths: |
|
if os.path.exists(path): |
|
os.remove(path) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"index_file": index_file, |
|
"audio_paths": audio_paths, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, index_file, audio_paths): |
|
with open(index_file, encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter='\t') |
|
for key, row in enumerate(reader): |
|
if self.config.name == 'cleaned': |
|
path = os.path.join( |
|
audio_paths[row['utterance_id'][0]], 'cleaned', |
|
'asr_nepali', 'data', row['utterance_id'][:2], |
|
f"{row['utterance_id']}.flac" |
|
) |
|
else: |
|
path = os.path.join( |
|
audio_paths[row['utterance_id'][0]], |
|
'asr_nepali', 'data', row['utterance_id'][:2], |
|
f"{row['utterance_id']}.flac" |
|
) |
|
yield key, { |
|
"utterance_id": row['utterance_id'], |
|
"speaker_id": row['speaker_id'], |
|
"utterance": path, |
|
"transcription": row['transcription'], |
|
"num_frames": int(row['num_frames']), |
|
} |
|
|