Datasets:
File size: 7,310 Bytes
28cc287 9f6d2ae bf0fd87 28cc287 9f6d2ae 58b647c bf0fd87 9f6d2ae bf0fd87 9f6d2ae bf0fd87 58b647c bf0fd87 58b647c bf0fd87 28cc287 bf0fd87 9f6d2ae 58b647c 9f6d2ae bf0fd87 9f6d2ae 28cc287 9f6d2ae 58b647c 9f6d2ae bf0fd87 9f6d2ae 58b647c 9f6d2ae bf0fd87 9f6d2ae bf0fd87 9f6d2ae 58b647c 9f6d2ae 58b647c 9f6d2ae 28cc287 bf0fd87 9f6d2ae bf0fd87 9f6d2ae bf0fd87 9f6d2ae bf0fd87 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
from collections import defaultdict
import os
import json
import csv
import datasets
_DESCRIPTION = """
A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation.
"""
_CITATION = """
@inproceedings{wang-etal-2021-voxpopuli,
title = "{V}ox{P}opuli: A Large-Scale Multilingual Speech Corpus for Representation Learning,
Semi-Supervised Learning and Interpretation",
author = "Wang, Changhan and
Riviere, Morgane and
Lee, Ann and
Wu, Anne and
Talnikar, Chaitanya and
Haziza, Daniel and
Williamson, Mary and
Pino, Juan and
Dupoux, Emmanuel",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics
and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = aug,
year = "2021",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-long.80",
doi = "10.18653/v1/2021.acl-long.80",
pages = "993--1003",
}
"""
_HOMEPAGE = "https://github.com/facebookresearch/voxpopuli"
_LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/"
_ASR_LANGUAGES = [
"en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
"sk", "sl", "et", "lt"
]
_ASR_ACCENTED_LANGUAGES = [
"en_accented"
]
_LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
_BASE_DATA_DIR = "https://huggingface.co/datasets/polinaeterna/voxpopuli/resolve/main/data/"
_N_SHARDS_FILE = _BASE_DATA_DIR + "n_files.json"
_AUDIO_ARCHIVE_PATH = _BASE_DATA_DIR + "{lang}/{split}/{split}_part_{n_shard}.tar.gz"
_METADATA_PATH = _BASE_DATA_DIR + "{lang}/asr_{split}.tsv"
class VoxpopuliConfig(datasets.BuilderConfig):
"""BuilderConfig for VoxPopuli."""
def __init__(self, name, **kwargs):
"""
Args:
name: `string`, name of dataset config
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(name=name, **kwargs)
self.languages = _LANGUAGES if name == "all" else [name]
# self.data_root_dis = {lang: _DATA_DIR.format(lang) for lang in self.languages}
class Voxpopuli(datasets.GeneratorBasedBuilder):
"""The VoxPopuli dataset."""
VERSION = datasets.Version("1.3.0") # TODO: version
BUILDER_CONFIGS = [
VoxpopuliConfig(
name=name,
version=datasets.Version("1.3.0"),
)
for name in _LANGUAGES + ["all"]
]
DEFAULT_WRITER_BATCH_SIZE = 256 # SET THIS TO A LOWER VALUE IF IT USES TOO MUCH RAM SPACE
def _info(self):
features = datasets.Features(
{
"path": datasets.Value("string"),
"language": datasets.ClassLabel(names=_LANGUAGES),
"raw_text": datasets.Value("string"),
"normalized_text": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
# "segment_id": datasets.Value("int16"), # TODO
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
with open(n_shards_path) as f:
n_shards = json.load(f)
audio_urls = defaultdict(dict)
for lang in self.config.languages:
for split in ["train", "test", "dev"]:
audio_urls[split][lang] = [_AUDIO_ARCHIVE_PATH.format(lang=lang, split=split, n_shard=i) for i in range(n_shards[lang][split])]
meta_urls = defaultdict(dict)
for split in ["train", "test", "dev"]:
meta_urls[split][lang] = _METADATA_PATH.format(lang=lang, split=split)
# dl_manager.download_config.num_proc = len(urls)
meta_paths = dl_manager.download_and_extract(meta_urls)
audio_paths = dl_manager.download(audio_urls)
local_extracted_audio_paths = (
dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
{
"train": [None] * len(audio_paths["train"]),
"dev": [None] * len(audio_paths["dev"]),
"test": [None] * len(audio_paths["test"]),
}
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
in audio_paths["train"].items()},
"local_extracted_audio_archives_paths": local_extracted_audio_paths["train"] if local_extracted_audio_paths else None,
"metadata_paths": meta_paths["train"],
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
in audio_paths["dev"].items()},
"local_extracted_audio_archives_paths": local_extracted_audio_paths["dev"] if local_extracted_audio_paths else None,
"metadata_paths": meta_paths["dev"],
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_archives": {lang: [dl_manager.iter_archive(archive) for archive in lang_archives] for lang, lang_archives
in audio_paths["test"].items()},
"local_extracted_audio_archives_paths": local_extracted_audio_paths["test"] if local_extracted_audio_paths else None,
"metadata_paths": meta_paths["test"],
}
),
]
def _generate_examples(self, audio_archives, local_extracted_audio_archives_paths, metadata_paths):
assert len(metadata_paths) == len(audio_archives)
for lang in self.config.languages:
meta_path = metadata_paths[lang]
with open(meta_path) as f:
metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
for audio_archive, local_extracted_audio_archive_path in zip(audio_archives[lang], local_extracted_audio_archives_paths[lang]):
for audio_filename, audio_file in audio_archive:
audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
path = os.path.join(local_extracted_audio_archive_path, audio_filename) if local_extracted_audio_archive_path else audio_filename
yield audio_id, {
"path": path,
"language": lang,
"raw_text": metadata[audio_id]["raw_text"],
"normalized_text": metadata[audio_id]["normalized_text"],
"audio": {"path": path, "bytes": audio_file.read()}
} |