|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Leading and Trailing Silences Removed Large Nepali ASR Dataset""" |
|
|
|
import os |
|
import csv |
|
|
|
import datasets |
|
from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
_DESCRIPTION = """\ |
|
This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file. |
|
The data set has been manually quality checked, but there might still be errors. |
|
The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/SumitMdhr/ASR/resolve/main/" |
|
_URLS = { |
|
"zipfile": _URL + "CLEAN_DATA.zip", |
|
"index_file": _URL + "metadata1.tsv", |
|
} |
|
|
|
|
|
class ASR_NEPALI(datasets.GeneratorBasedBuilder): |
|
"""End Silences Removed Large Nepali ASR Dataset""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"utterance_id": datasets.Value("string"), |
|
"transcription": datasets.Value("string"), |
|
"utterance": datasets.Audio(), |
|
} |
|
), |
|
homepage="https://www.openslr.org/54/", |
|
task_templates=[ |
|
QuestionAnsweringExtractive( |
|
question_column="question", |
|
context_column="context", |
|
answers_column="answers", |
|
) |
|
], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
index_file = dl_manager.download(_URLS["index_file"]) |
|
audio_paths = dl_manager.download_and_extract(_URLS["zipfile"]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"index_file": index_file, |
|
"audio_paths": audio_paths, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, index_file, audio_paths): |
|
with open(index_file, encoding="utf-8") as f: |
|
reader = csv.DictReader(f, delimiter="\t") |
|
for key, row in enumerate(reader): |
|
path = os.path.join(audio_paths, "CLEAN_DATA", row["utterance_id"]) |
|
yield key, { |
|
"utterance_id": row["utterance_id"], |
|
"utterance": path, |
|
"transcription": row["transcription"], |
|
} |
|
|