File size: 7,424 Bytes
96fd80c
 
 
746da34
44482b3
778b351
96fd80c
746da34
 
 
560a88d
09857e0
 
 
96fd80c
 
09857e0
 
69bf1ce
96fd80c
 
 
09857e0
 
96fd80c
 
 
778b351
 
96fd80c
 
09857e0
 
96fd80c
 
 
09857e0
96fd80c
09857e0
 
96fd80c
 
 
09857e0
 
96fd80c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09857e0
96fd80c
 
746da34
 
 
560a88d
 
746da34
 
560a88d
746da34
 
 
560a88d
746da34
bbea063
96fd80c
 
560a88d
96fd80c
560a88d
 
 
09857e0
96fd80c
 
560a88d
96fd80c
560a88d
 
 
09857e0
96fd80c
 
 
 
778b351
 
 
746da34
96fd80c
746da34
778b351
746da34
778b351
 
560a88d
96fd80c
778b351
 
44482b3
746da34
44482b3
746da34
 
 
 
 
 
 
 
 
 
 
 
 
09857e0
778b351
746da34
 
778b351
 
 
 
 
 
746da34
560a88d
09857e0
746da34
778b351
746da34
778b351
560a88d
44482b3
778b351
44482b3
560a88d
 
 
 
746da34
560a88d
 
44482b3
778b351
560a88d
746da34
 
778b351
 
 
 
44482b3
778b351
 
 
 
 
 
 
 
 
 
 
 
 
 
44482b3
746da34
778b351
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
import csv
import datasets
from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split
import logging
from pathlib import Path
import os

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

_PROMPTS_URLS = {
    "dev": "automatic/validation.csv",
    "train": "automatic/train.csv",
}

_PROMPTS_FILTERED_URLS = {
    "dev": "automatic/validation.csv",
    "train": "automatic/train.csv",
}

_ARCHIVES = {
    "dev": "automatic.tar.gz",
    "train": "automatic.tar.gz",
}

_PATH_TO_CLIPS = {
    "dev": "validation",
    "train": "train",
}

class NurcSPConfig(BuilderConfig):
    def __init__(self, prompts_type="original", **kwargs):
        super().__init__(**kwargs)
        self.prompts_type = prompts_type

class NurcSPDataset(GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        NurcSPConfig(name="original", description="Original audio prompts", prompts_type="original"),
        NurcSPConfig(name="filtered", description="Filtered audio prompts", prompts_type="filtered"),
    ]

    def _info(self):
        return DatasetInfo(
            features=datasets.Features(
                {
                    "audio_name": datasets.Value("string"),
                    "file_path": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "start_time": datasets.Value("string"),
                    "end_time": datasets.Value("string"),
                    "duration": datasets.Value("string"),
                    "quality": datasets.Value("string"),
                    "speech_genre": datasets.Value("string"),
                    "speech_style": datasets.Value("string"),
                    "variety": datasets.Value("string"),
                    "accent": datasets.Value("string"),
                    "sex": datasets.Value("string"),
                    "age_range": datasets.Value("string"),
                    "num_speakers": datasets.Value("string"),
                    "speaker_id": datasets.Value("string"),
                    "audio": datasets.Audio(sampling_rate=16_000),
                }
            )
        )

    def _split_generators(self, dl_manager):
        logger.info(f"Using prompts_type: {self.config.prompts_type}")
        
        prompts_urls = _PROMPTS_URLS
        if self.config.prompts_type == "filtered":
            prompts_urls = _PROMPTS_FILTERED_URLS
        
        logger.info(f"Downloading prompts from: {prompts_urls}")
        prompts_path = dl_manager.download(prompts_urls)
        logger.info(f"Downloaded prompts to: {prompts_path}")
        
        logger.info(f"Downloading archives from: {_ARCHIVES}")
        archive = dl_manager.download(_ARCHIVES)
        logger.info(f"Downloaded archives to: {archive}")

        return [
            SplitGenerator(
                name=Split.VALIDATION,
                gen_kwargs={
                    "prompts_path": prompts_path["dev"],
                    "path_to_clips": _PATH_TO_CLIPS["dev"],
                    "audio_files": dl_manager.iter_archive(archive["dev"]),
                }
            ),
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={
                    "prompts_path": prompts_path["train"],
                    "path_to_clips": _PATH_TO_CLIPS["train"],
                    "audio_files": dl_manager.iter_archive(archive["train"]),
                }
            ),
        ]

    def _generate_examples(self, prompts_path, path_to_clips, audio_files):
        logger.info("\n=== Path Analysis ===")
        logger.info(f"CSV Path: {prompts_path}")
        logger.info(f"Expected clips directory: {path_to_clips}")
        
        examples = {}
        example_count = 0
        csv_paths = []
        
        # Read CSV file and store paths
        logger.info("\n=== Reading CSV ===")
        with open(prompts_path, "r") as f:
            csv_reader = csv.DictReader(f)
            for row in csv_reader:
                file_path = Path(row['file_path']).as_posix()
                examples[file_path] = {
                    "audio_name": row['audio_name'],
                    "file_path": file_path,
                    "text": row['text'],
                    "start_time": row['start_time'],
                    "end_time": row['end_time'],
                    "duration": row['duration'],
                    "quality": row['quality'],
                    "speech_genre": row['speech_genre'],
                    "speech_style": row['speech_style'],
                    "variety": row['variety'],
                    "accent": row['accent'],
                    "sex": row['sex'],
                    "age_range": row['age_range'],
                    "num_speakers": row['num_speakers'],
                    "speaker_id": row['speaker_id'],
                }
                csv_paths.append(file_path)
                example_count += 1
        
        logger.info(f"Found {example_count} entries in CSV")
        
        # Show first few CSV paths
        logger.info("\n=== Sample CSV Paths ===")
        for path in csv_paths[:3]:
            logger.info(f"CSV path: {path}")
        
        inside_clips_dir = False
        id_ = 0
        matched_files = 0
        archive_paths = []
        
        logger.info("\n=== Processing Archive ===")
        for path, f in audio_files:
            path = Path(path).as_posix()
            archive_paths.append(path)
            
            if path.startswith(path_to_clips):
                inside_clips_dir = True
                if path in examples:
                    audio = {"path": path, "bytes": f.read()}
                    matched_files += 1
                    yield id_, {**examples[path], "audio": audio}
                    id_ += 1
                else:
                    logger.debug(f"Unmatched archive path: {path}")
            elif inside_clips_dir:
                break
        
        # Show path comparison
        logger.info("\n=== Path Comparison ===")
        logger.info("First few paths from archive:")
        for path in archive_paths[:3]:
            logger.info(f"Archive path: {path}")
            
            # Try to find a similar path in CSV
            for csv_path in csv_paths:
                if any(part in csv_path for part in path.split('/')):
                    logger.info(f"Similar CSV path: {csv_path}")
                    logger.info("Difference analysis:")
                    logger.info(f"  Archive path parts: {path.split('/')}")
                    logger.info(f"  CSV path parts: {csv_path.split('/')}")
                    break
        
        logger.info("\n=== Summary ===")
        logger.info(f"Total paths in CSV: {len(csv_paths)}")
        logger.info(f"Total paths in archive: {len(archive_paths)}")
        logger.info(f"Successfully matched files: {matched_files}")
        
        if matched_files == 0:
            logger.warning("\n=== MATCHING FAILED ===")
            logger.warning("No files were matched between CSV and archive.")
            logger.warning("Common issues:")
            logger.warning("1. CSV paths might need to include/exclude the base directory")
            logger.warning("2. Path separators might be different (/ vs \\)")
            logger.warning("3. Case sensitivity issues in paths")
            logger.warning("4. Extra or missing directory levels")