File size: 6,367 Bytes
613854c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import os
import json
import csv
import datasets
from datasets.utils.py_utils import size_str
from tqdm import tqdm

from .languages import LANGUAGES
from .release_stats import STATS

_CITATION = """\
@inproceedings{Trans,
  title={fj11: A Massively Speech Corpus},
  author={fj11},
  year={2025}
}
"""

_HOMEPAGE = "https://huggingface.co/datasets/DataLabX"

_LICENSE = "CC-BY-SA-4.0"

_BASE_URL = "https://huggingface.co/datasets/DataLabX/ScreenTalk-XS/resolve/main/"

_AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"

_TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"

_N_SHARDS_URL = _BASE_URL + "n_shards.json"


def is_valid_token(token):
    return True
    

class TransConfig(datasets.BuilderConfig):
    """BuilderConfig for TransConfig."""
    def __init__(self, name, version, **kwargs):
        self.language = kwargs.pop("language", None)
        self.release_date = kwargs.pop("release_date", None)
        self.total_hr = kwargs.pop("total_hr", None)
        self.validated_hr = kwargs.pop("validated_hr", None)
        self.num_clips = kwargs.pop("num_clips", None)
        self.size_bytes = kwargs.pop("size_bytes", None)
        self.size_human = size_str(self.size_bytes)
        description = (
            f"This dataset consists of transcribed speech data from TV series and movies across various genres, "
            f"including action, drama, sci-fi, and romance. It was released on {self.release_date} and contains "
            f"{self.total_hr} hours of transcribed speech data. "
            f"The dataset includes {self.num_clips} audio clips in {self.language}, with a total size of {self.size_human}, "
            f"and is designed for automatic speech recognition (ASR) model training and fine-tuning, providing diverse and "
            f"natural conversational speech from real-world entertainment media."
        )

        super(TransConfig, self).__init__(
            name=name,
            version=version,
            description=description,
            **kwargs
        )


class ScreenTalk(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS = [
        TransConfig(
            name=lang,
            version=STATS["version"],
            language=LANGUAGES[lang],
            release_date=STATS["date"],
            num_clips=lang_stats["clips"],
            total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
            size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
        )
        for lang, lang_stats in STATS["locales"].items()
    ]

    def _info(self):
        total_languages = len(STATS["locales"])
        total_valid_hours = STATS["totalValidHrs"]
        description = (
            f"This dataset consists of {total_valid_hours} hours of validated speech data "
            f"in {total_languages} languages, with more content being continuously added. "
            "It is designed for training and fine-tuning automatic speech recognition (ASR) models, "
            "providing a diverse and realistic representation of spoken language."
        )
        features = datasets.Features(
                {
                    "audio": datasets.Audio(),  # Use datasets.Audio() instead of string
                    "sentence": datasets.Value("string"),
                }
            )
        return datasets.DatasetInfo(
            description=description,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            version=self.config.version,
        )

    def _split_generators(self, dl_manager):

        lang = self.config.name
        n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
        with open(n_shards_path, encoding="utf-8") as f:
            n_shards = json.load(f)

        user_token = dl_manager.download_config.token
        has_valid_token = is_valid_token(user_token)

        audio_urls = {}
        splits = ["xs"]

        for split in splits:
            audio_urls[split] = [
                _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
            ]
        archive_paths = dl_manager.download(audio_urls)
        
        local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
        meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
        meta_paths = dl_manager.download_and_extract(meta_urls)

        split_generators = []
        split_names = {
            "xs": datasets.Split("xs"),
        }

        for split in splits:
            split_generators.append(
                datasets.SplitGenerator(
                    name=split_names.get(split, split),
                    gen_kwargs={
                        "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
                        "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
                        "meta_path": meta_paths[split],
                    },
                ),
            )

        return split_generators

    def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):

        metadata = {}
        with open(meta_path, encoding="utf-8") as f:
            reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
            for row in tqdm(reader, desc="Reading metadata..."):
                if not isinstance(row, dict):
                    continue
                path = row.get("audio")
                _, filename = os.path.split(path)
                metadata[filename] = row

        for i, audio_archive in enumerate(archives):
            for path, file in audio_archive:
                _, filename = os.path.split(path)
                if filename in metadata:
                    result = dict(metadata[filename])
                    # set the audio feature and the path to the extracted file
                    path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
                    # print("path: ", path)
                    result["audio"] = path
                    # result["path"] = path
                    yield path, result