fj11 commited on
Commit
513ae28
·
1 Parent(s): cccf4f6

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (a4990a48408216f00f0f49dc8ea984a576359100)
- Delete loading script auxiliary file (e5d46afa133aaaabd89370f60a4eae077210e5d2)
- Delete loading script auxiliary file (628cfa90134d8179017de36a34c2d34e200d3a19)
- Delete loading script (eb0327e64eb8fee7edcb514252b55844ad7ab804)
- Delete data file (c6eb7446b39be87a134d2785b794b26c56068460)
- Delete data file (43af9b7ab852b4ad3b96dfc7cf05b7ebd2650380)
- Delete data file (219858cba27fefea8cd66ff004c160cd441d7d5f)
- Delete data file (8184f948199a4bc464c403439fa62c95de6105f9)

Files changed (6) hide show
  1. README.md +19 -0
  2. ScreenTalk-XS.py +0 -167
  3. languages.py +0 -1
  4. n_shards.json +0 -5
  5. release_stats.py +0 -17
  6. transcript/en/xs.tsv +0 -0
README.md CHANGED
@@ -6,6 +6,25 @@ language:
6
  - en
7
  size_categories:
8
  - 1K<n<10K
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  ---
10
 
11
  # 🎬 ScreenTalk-XS: Sample Speech Dataset from Screen Content 🖥️
 
6
  - en
7
  size_categories:
8
  - 1K<n<10K
9
+ dataset_info:
10
+ config_name: en
11
+ features:
12
+ - name: audio
13
+ dtype: audio
14
+ - name: sentence
15
+ dtype: string
16
+ splits:
17
+ - name: xs
18
+ num_bytes: 653140044.0
19
+ num_examples: 10000
20
+ download_size: 647516502
21
+ dataset_size: 653140044.0
22
+ configs:
23
+ - config_name: en
24
+ data_files:
25
+ - split: xs
26
+ path: en/xs-*
27
+ default: true
28
  ---
29
 
30
  # 🎬 ScreenTalk-XS: Sample Speech Dataset from Screen Content 🖥️
ScreenTalk-XS.py DELETED
@@ -1,167 +0,0 @@
1
- import os
2
- import json
3
- import csv
4
- import datasets
5
- from datasets.utils.py_utils import size_str
6
- from tqdm import tqdm
7
-
8
- from .languages import LANGUAGES
9
- from .release_stats import STATS
10
-
11
- _CITATION = """\
12
- @inproceedings{Trans,
13
- title={fj11: A Massively Speech Corpus},
14
- author={fj11},
15
- year={2025}
16
- }
17
- """
18
-
19
- _HOMEPAGE = "https://huggingface.co/datasets/DataLabX"
20
-
21
- _LICENSE = "CC-BY-SA-4.0"
22
-
23
- _BASE_URL = "https://huggingface.co/datasets/DataLabX/ScreenTalk-XS/resolve/main/"
24
-
25
- _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
26
-
27
- _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
28
-
29
- _N_SHARDS_URL = _BASE_URL + "n_shards.json"
30
-
31
-
32
- def is_valid_token(token):
33
- return True
34
-
35
-
36
- class TransConfig(datasets.BuilderConfig):
37
- """BuilderConfig for TransConfig."""
38
- def __init__(self, name, version, **kwargs):
39
- self.language = kwargs.pop("language", None)
40
- self.release_date = kwargs.pop("release_date", None)
41
- self.total_hr = kwargs.pop("total_hr", None)
42
- self.validated_hr = kwargs.pop("validated_hr", None)
43
- self.num_clips = kwargs.pop("num_clips", None)
44
- self.size_bytes = kwargs.pop("size_bytes", None)
45
- self.size_human = size_str(self.size_bytes)
46
- description = (
47
- f"This dataset consists of transcribed speech data from TV series and movies across various genres, "
48
- f"including action, drama, sci-fi, and romance. It was released on {self.release_date} and contains "
49
- f"{self.total_hr} hours of transcribed speech data. "
50
- f"The dataset includes {self.num_clips} audio clips in {self.language}, with a total size of {self.size_human}, "
51
- f"and is designed for automatic speech recognition (ASR) model training and fine-tuning, providing diverse and "
52
- f"natural conversational speech from real-world entertainment media."
53
- )
54
-
55
- super(TransConfig, self).__init__(
56
- name=name,
57
- version=version,
58
- description=description,
59
- **kwargs
60
- )
61
-
62
-
63
- class ScreenTalk(datasets.GeneratorBasedBuilder):
64
- VERSION = datasets.Version("1.0.0")
65
- BUILDER_CONFIGS = [
66
- TransConfig(
67
- name=lang,
68
- version=STATS["version"],
69
- language=LANGUAGES[lang],
70
- release_date=STATS["date"],
71
- num_clips=lang_stats["clips"],
72
- total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
73
- size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
74
- )
75
- for lang, lang_stats in STATS["locales"].items()
76
- ]
77
-
78
- def _info(self):
79
- total_languages = len(STATS["locales"])
80
- total_valid_hours = STATS["totalValidHrs"]
81
- description = (
82
- f"This dataset consists of {total_valid_hours} hours of validated speech data "
83
- f"in {total_languages} languages, with more content being continuously added. "
84
- "It is designed for training and fine-tuning automatic speech recognition (ASR) models, "
85
- "providing a diverse and realistic representation of spoken language."
86
- )
87
- features = datasets.Features(
88
- {
89
- "audio": datasets.Audio(), # Use datasets.Audio() instead of string
90
- "sentence": datasets.Value("string"),
91
- }
92
- )
93
- return datasets.DatasetInfo(
94
- description=description,
95
- features=features,
96
- supervised_keys=None,
97
- homepage=_HOMEPAGE,
98
- license=_LICENSE,
99
- citation=_CITATION,
100
- version=self.config.version,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
-
105
- lang = self.config.name
106
- n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
107
- with open(n_shards_path, encoding="utf-8") as f:
108
- n_shards = json.load(f)
109
-
110
- user_token = dl_manager.download_config.token
111
- has_valid_token = is_valid_token(user_token)
112
-
113
- audio_urls = {}
114
- splits = ["xs"]
115
-
116
- for split in splits:
117
- audio_urls[split] = [
118
- _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
119
- ]
120
- archive_paths = dl_manager.download(audio_urls)
121
-
122
- local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
123
- meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
124
- meta_paths = dl_manager.download_and_extract(meta_urls)
125
-
126
- split_generators = []
127
- split_names = {
128
- "xs": datasets.Split("xs"),
129
- }
130
-
131
- for split in splits:
132
- split_generators.append(
133
- datasets.SplitGenerator(
134
- name=split_names.get(split, split),
135
- gen_kwargs={
136
- "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
137
- "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
138
- "meta_path": meta_paths[split],
139
- },
140
- ),
141
- )
142
-
143
- return split_generators
144
-
145
- def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
146
-
147
- metadata = {}
148
- with open(meta_path, encoding="utf-8") as f:
149
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
150
- for row in tqdm(reader, desc="Reading metadata..."):
151
- if not isinstance(row, dict):
152
- continue
153
- path = row.get("audio")
154
- _, filename = os.path.split(path)
155
- metadata[filename] = row
156
-
157
- for i, audio_archive in enumerate(archives):
158
- for path, file in audio_archive:
159
- _, filename = os.path.split(path)
160
- if filename in metadata:
161
- result = dict(metadata[filename])
162
- # set the audio feature and the path to the extracted file
163
- path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
164
- # print("path: ", path)
165
- result["audio"] = path
166
- # result["path"] = path
167
- yield path, result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
languages.py DELETED
@@ -1 +0,0 @@
1
- LANGUAGES = {"en": "English",}
 
 
n_shards.json DELETED
@@ -1,5 +0,0 @@
1
- {
2
- "en": {
3
- "xs": 2
4
- }
5
- }
 
 
 
 
 
 
release_stats.py DELETED
@@ -1,17 +0,0 @@
1
- STATS = {
2
- "version": "1.0.0",
3
- "date": "2025-02-24",
4
- "totalValidHrs": 0,
5
- "locales": {
6
- "en": {
7
- "buckets": {
8
- "xs": 1,
9
- },
10
- "duration": 0,
11
- "clips": 10,
12
- "totalHrs": 0,
13
- "size": 10
14
-
15
- }
16
- }
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
transcript/en/xs.tsv DELETED
The diff for this file is too large to render. See raw diff