NURC-SP_ENTOA_TTS / NURC-SP_ENTOA_TTS.py
RodrigoLimaRFL's picture
Update NURC-SP_ENTOA_TTS.py
778b351 verified
raw
history blame
7.42 kB
import csv
import datasets
from datasets import BuilderConfig, GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split
import logging
from pathlib import Path
import os
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_PROMPTS_URLS = {
"dev": "automatic/validation.csv",
"train": "automatic/train.csv",
}
_PROMPTS_FILTERED_URLS = {
"dev": "automatic/validation.csv",
"train": "automatic/train.csv",
}
_ARCHIVES = {
"dev": "automatic.tar.gz",
"train": "automatic.tar.gz",
}
_PATH_TO_CLIPS = {
"dev": "validation",
"train": "train",
}
class NurcSPConfig(BuilderConfig):
def __init__(self, prompts_type="original", **kwargs):
super().__init__(**kwargs)
self.prompts_type = prompts_type
class NurcSPDataset(GeneratorBasedBuilder):
BUILDER_CONFIGS = [
NurcSPConfig(name="original", description="Original audio prompts", prompts_type="original"),
NurcSPConfig(name="filtered", description="Filtered audio prompts", prompts_type="filtered"),
]
def _info(self):
return DatasetInfo(
features=datasets.Features(
{
"audio_name": datasets.Value("string"),
"file_path": datasets.Value("string"),
"text": datasets.Value("string"),
"start_time": datasets.Value("string"),
"end_time": datasets.Value("string"),
"duration": datasets.Value("string"),
"quality": datasets.Value("string"),
"speech_genre": datasets.Value("string"),
"speech_style": datasets.Value("string"),
"variety": datasets.Value("string"),
"accent": datasets.Value("string"),
"sex": datasets.Value("string"),
"age_range": datasets.Value("string"),
"num_speakers": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
}
)
)
def _split_generators(self, dl_manager):
logger.info(f"Using prompts_type: {self.config.prompts_type}")
prompts_urls = _PROMPTS_URLS
if self.config.prompts_type == "filtered":
prompts_urls = _PROMPTS_FILTERED_URLS
logger.info(f"Downloading prompts from: {prompts_urls}")
prompts_path = dl_manager.download(prompts_urls)
logger.info(f"Downloaded prompts to: {prompts_path}")
logger.info(f"Downloading archives from: {_ARCHIVES}")
archive = dl_manager.download(_ARCHIVES)
logger.info(f"Downloaded archives to: {archive}")
return [
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={
"prompts_path": prompts_path["dev"],
"path_to_clips": _PATH_TO_CLIPS["dev"],
"audio_files": dl_manager.iter_archive(archive["dev"]),
}
),
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"prompts_path": prompts_path["train"],
"path_to_clips": _PATH_TO_CLIPS["train"],
"audio_files": dl_manager.iter_archive(archive["train"]),
}
),
]
def _generate_examples(self, prompts_path, path_to_clips, audio_files):
logger.info("\n=== Path Analysis ===")
logger.info(f"CSV Path: {prompts_path}")
logger.info(f"Expected clips directory: {path_to_clips}")
examples = {}
example_count = 0
csv_paths = []
# Read CSV file and store paths
logger.info("\n=== Reading CSV ===")
with open(prompts_path, "r") as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
file_path = Path(row['file_path']).as_posix()
examples[file_path] = {
"audio_name": row['audio_name'],
"file_path": file_path,
"text": row['text'],
"start_time": row['start_time'],
"end_time": row['end_time'],
"duration": row['duration'],
"quality": row['quality'],
"speech_genre": row['speech_genre'],
"speech_style": row['speech_style'],
"variety": row['variety'],
"accent": row['accent'],
"sex": row['sex'],
"age_range": row['age_range'],
"num_speakers": row['num_speakers'],
"speaker_id": row['speaker_id'],
}
csv_paths.append(file_path)
example_count += 1
logger.info(f"Found {example_count} entries in CSV")
# Show first few CSV paths
logger.info("\n=== Sample CSV Paths ===")
for path in csv_paths[:3]:
logger.info(f"CSV path: {path}")
inside_clips_dir = False
id_ = 0
matched_files = 0
archive_paths = []
logger.info("\n=== Processing Archive ===")
for path, f in audio_files:
path = Path(path).as_posix()
archive_paths.append(path)
if path.startswith(path_to_clips):
inside_clips_dir = True
if path in examples:
audio = {"path": path, "bytes": f.read()}
matched_files += 1
yield id_, {**examples[path], "audio": audio}
id_ += 1
else:
logger.debug(f"Unmatched archive path: {path}")
elif inside_clips_dir:
break
# Show path comparison
logger.info("\n=== Path Comparison ===")
logger.info("First few paths from archive:")
for path in archive_paths[:3]:
logger.info(f"Archive path: {path}")
# Try to find a similar path in CSV
for csv_path in csv_paths:
if any(part in csv_path for part in path.split('/')):
logger.info(f"Similar CSV path: {csv_path}")
logger.info("Difference analysis:")
logger.info(f" Archive path parts: {path.split('/')}")
logger.info(f" CSV path parts: {csv_path.split('/')}")
break
logger.info("\n=== Summary ===")
logger.info(f"Total paths in CSV: {len(csv_paths)}")
logger.info(f"Total paths in archive: {len(archive_paths)}")
logger.info(f"Successfully matched files: {matched_files}")
if matched_files == 0:
logger.warning("\n=== MATCHING FAILED ===")
logger.warning("No files were matched between CSV and archive.")
logger.warning("Common issues:")
logger.warning("1. CSV paths might need to include/exclude the base directory")
logger.warning("2. Path separators might be different (/ vs \\)")
logger.warning("3. Case sensitivity issues in paths")
logger.warning("4. Extra or missing directory levels")