fj11 commited on
Commit
613854c
·
1 Parent(s): ec5d576

Create ScreenTalk-XS.py

Browse files
Files changed (1) hide show
  1. ScreenTalk-XS.py +167 -0
ScreenTalk-XS.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import csv
4
+ import datasets
5
+ from datasets.utils.py_utils import size_str
6
+ from tqdm import tqdm
7
+
8
+ from .languages import LANGUAGES
9
+ from .release_stats import STATS
10
+
11
+ _CITATION = """\
12
+ @inproceedings{Trans,
13
+ title={fj11: A Massively Speech Corpus},
14
+ author={fj11},
15
+ year={2025}
16
+ }
17
+ """
18
+
19
+ _HOMEPAGE = "https://huggingface.co/datasets/DataLabX"
20
+
21
+ _LICENSE = "CC-BY-SA-4.0"
22
+
23
+ _BASE_URL = "https://huggingface.co/datasets/DataLabX/ScreenTalk-XS/resolve/main/"
24
+
25
+ _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
26
+
27
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
28
+
29
+ _N_SHARDS_URL = _BASE_URL + "n_shards.json"
30
+
31
+
32
+ def is_valid_token(token):
33
+ return True
34
+
35
+
36
+ class TransConfig(datasets.BuilderConfig):
37
+ """BuilderConfig for TransConfig."""
38
+ def __init__(self, name, version, **kwargs):
39
+ self.language = kwargs.pop("language", None)
40
+ self.release_date = kwargs.pop("release_date", None)
41
+ self.total_hr = kwargs.pop("total_hr", None)
42
+ self.validated_hr = kwargs.pop("validated_hr", None)
43
+ self.num_clips = kwargs.pop("num_clips", None)
44
+ self.size_bytes = kwargs.pop("size_bytes", None)
45
+ self.size_human = size_str(self.size_bytes)
46
+ description = (
47
+ f"This dataset consists of transcribed speech data from TV series and movies across various genres, "
48
+ f"including action, drama, sci-fi, and romance. It was released on {self.release_date} and contains "
49
+ f"{self.total_hr} hours of transcribed speech data. "
50
+ f"The dataset includes {self.num_clips} audio clips in {self.language}, with a total size of {self.size_human}, "
51
+ f"and is designed for automatic speech recognition (ASR) model training and fine-tuning, providing diverse and "
52
+ f"natural conversational speech from real-world entertainment media."
53
+ )
54
+
55
+ super(TransConfig, self).__init__(
56
+ name=name,
57
+ version=version,
58
+ description=description,
59
+ **kwargs
60
+ )
61
+
62
+
63
+ class ScreenTalk(datasets.GeneratorBasedBuilder):
64
+ VERSION = datasets.Version("1.0.0")
65
+ BUILDER_CONFIGS = [
66
+ TransConfig(
67
+ name=lang,
68
+ version=STATS["version"],
69
+ language=LANGUAGES[lang],
70
+ release_date=STATS["date"],
71
+ num_clips=lang_stats["clips"],
72
+ total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
73
+ size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
74
+ )
75
+ for lang, lang_stats in STATS["locales"].items()
76
+ ]
77
+
78
+ def _info(self):
79
+ total_languages = len(STATS["locales"])
80
+ total_valid_hours = STATS["totalValidHrs"]
81
+ description = (
82
+ f"This dataset consists of {total_valid_hours} hours of validated speech data "
83
+ f"in {total_languages} languages, with more content being continuously added. "
84
+ "It is designed for training and fine-tuning automatic speech recognition (ASR) models, "
85
+ "providing a diverse and realistic representation of spoken language."
86
+ )
87
+ features = datasets.Features(
88
+ {
89
+ "audio": datasets.Audio(), # Use datasets.Audio() instead of string
90
+ "sentence": datasets.Value("string"),
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ description=description,
95
+ features=features,
96
+ supervised_keys=None,
97
+ homepage=_HOMEPAGE,
98
+ license=_LICENSE,
99
+ citation=_CITATION,
100
+ version=self.config.version,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+
105
+ lang = self.config.name
106
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
107
+ with open(n_shards_path, encoding="utf-8") as f:
108
+ n_shards = json.load(f)
109
+
110
+ user_token = dl_manager.download_config.token
111
+ has_valid_token = is_valid_token(user_token)
112
+
113
+ audio_urls = {}
114
+ splits = ["xs"]
115
+
116
+ for split in splits:
117
+ audio_urls[split] = [
118
+ _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
119
+ ]
120
+ archive_paths = dl_manager.download(audio_urls)
121
+
122
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
123
+ meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
124
+ meta_paths = dl_manager.download_and_extract(meta_urls)
125
+
126
+ split_generators = []
127
+ split_names = {
128
+ "xs": datasets.Split("xs"),
129
+ }
130
+
131
+ for split in splits:
132
+ split_generators.append(
133
+ datasets.SplitGenerator(
134
+ name=split_names.get(split, split),
135
+ gen_kwargs={
136
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
137
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
138
+ "meta_path": meta_paths[split],
139
+ },
140
+ ),
141
+ )
142
+
143
+ return split_generators
144
+
145
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
146
+
147
+ metadata = {}
148
+ with open(meta_path, encoding="utf-8") as f:
149
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
150
+ for row in tqdm(reader, desc="Reading metadata..."):
151
+ if not isinstance(row, dict):
152
+ continue
153
+ path = row.get("audio")
154
+ _, filename = os.path.split(path)
155
+ metadata[filename] = row
156
+
157
+ for i, audio_archive in enumerate(archives):
158
+ for path, file in audio_archive:
159
+ _, filename = os.path.split(path)
160
+ if filename in metadata:
161
+ result = dict(metadata[filename])
162
+ # set the audio feature and the path to the extracted file
163
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
164
+ # print("path: ", path)
165
+ result["audio"] = path
166
+ # result["path"] = path
167
+ yield path, result