File size: 3,858 Bytes
39b5f6c b21ba76 39b5f6c b21ba76 1f22e36 b21ba76 39b5f6c b21ba76 39b5f6c b21ba76 db42f9b b21ba76 39b5f6c 0c4fd82 39b5f6c 2220a25 39b5f6c 799ce12 39b5f6c d448a85 db42f9b d448a85 b21ba76 d448a85 637d00c d448a85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
from datasets import Dataset, GeneratorBasedBuilder, Features
import os
import tarfile
import librosa
import datasets
_LICENSE = "https://creativecommons.org/licenses/by/4.0/"
_HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3126"
_DATASET_URL = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3126/snemovna.tar.xz"
_DESCRIPTION = "Large corpus of Czech parliament plenary sessions, originaly released 2019-11-29 by Kratochvíl Jonáš, Polák Peter and Bojar Ondřej\
The dataset consists of 444 hours of transcribed speech audio snippets 1 to 40 seconds long.\
Original dataset transcriptions were converted to true case from uppercase using spacy library."
_CITATION = """\
@misc{11234/1-3126,
title = {Large Corpus of Czech Parliament Plenary Hearings},
author = {Kratochv{\'{\i}}l, Jon{\'a}{\v s} and Pol{\'a}k, Peter and Bojar, Ond{\v r}ej},
url = {http://hdl.handle.net/11234/1-3126},
note = {{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University},
copyright = {Creative Commons - Attribution 4.0 International ({CC} {BY} 4.0)},
year = {2019} } """
class CzechParliamentPlenaryHearings(GeneratorBasedBuilder):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16000),
"transcription": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_DATASET_URL)
data_dir = os.path.join(data_dir, 'ASR_DATA')
splits = ("train", "dev", "test")
split_names = {
"train": datasets.Split.TRAIN,
"dev": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
split_generators = []
for split in splits:
split_generators.append(
datasets.SplitGenerator(
name=split_names.get(split, split),
gen_kwargs={'split': split, 'data_dir': data_dir}
)
)
return split_generators
def _generate_examples(self, split, data_dir):
split_dir = os.path.join(data_dir, split)
for folder_name in os.listdir(split_dir):
folder_path = os.path.join(split_dir, folder_name)
if os.path.isdir(folder_path):
for audio_file in os.listdir(folder_path):
if audio_file.endswith('.wav'):
audio_path = os.path.join(folder_path, audio_file)
if split == "dev":
transcription_path = os.path.join(folder_path, audio_file[:-4] + '.txt')
else:
transcription_path = os.path.join(folder_path, audio_file + '.trn')
transcription = open(transcription_path).read().strip()
audio, sr = librosa.load(audio_path, sr=16000)
id = f"{folder_name}/{audio_file}"
yield id, {
'id': id,
'audio': {
'path': audio_path,
'bytes': audio.tobytes()
},
'transcription': transcription,
}
|