Spaces:
Running
Running
File size: 3,151 Bytes
04e5c28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import os
import sys
sys.path.append(os.getcwd())
import json
from concurrent.futures import ProcessPoolExecutor
from importlib.resources import files
from pathlib import Path
from tqdm import tqdm
import soundfile as sf
from datasets.arrow_writer import ArrowWriter
def deal_with_audio_dir(audio_dir):
sub_result, durations = [], []
vocab_set = set()
audio_lists = list(audio_dir.rglob("*.wav"))
for line in audio_lists:
text_path = line.with_suffix(".normalized.txt")
text = open(text_path, "r").read().strip()
duration = sf.info(line).duration
if duration < 0.4 or duration > 30:
continue
sub_result.append({"audio_path": str(line), "text": text, "duration": duration})
durations.append(duration)
vocab_set.update(list(text))
return sub_result, durations, vocab_set
def main():
result = []
duration_list = []
text_vocab_set = set()
# process raw data
executor = ProcessPoolExecutor(max_workers=max_workers)
futures = []
for subset in tqdm(SUB_SET):
dataset_path = Path(os.path.join(dataset_dir, subset))
[
futures.append(executor.submit(deal_with_audio_dir, audio_dir))
for audio_dir in dataset_path.iterdir()
if audio_dir.is_dir()
]
for future in tqdm(futures, total=len(futures)):
sub_result, durations, vocab_set = future.result()
result.extend(sub_result)
duration_list.extend(durations)
text_vocab_set.update(vocab_set)
executor.shutdown()
# save preprocessed dataset to disk
if not os.path.exists(f"{save_dir}"):
os.makedirs(f"{save_dir}")
print(f"\nSaving to {save_dir} ...")
with ArrowWriter(path=f"{save_dir}/raw.arrow") as writer:
for line in tqdm(result, desc="Writing to raw.arrow ..."):
writer.write(line)
# dup a json separately saving duration in case for DynamicBatchSampler ease
with open(f"{save_dir}/duration.json", "w", encoding="utf-8") as f:
json.dump({"duration": duration_list}, f, ensure_ascii=False)
# vocab map, i.e. tokenizer
with open(f"{save_dir}/vocab.txt", "w") as f:
for vocab in sorted(text_vocab_set):
f.write(vocab + "\n")
print(f"\nFor {dataset_name}, sample count: {len(result)}")
print(f"For {dataset_name}, vocab size is: {len(text_vocab_set)}")
print(f"For {dataset_name}, total {sum(duration_list)/3600:.2f} hours")
if __name__ == "__main__":
max_workers = 36
tokenizer = "char" # "pinyin" | "char"
SUB_SET = ["train-clean-100", "train-clean-360", "train-other-500"]
dataset_dir = "<SOME_PATH>/LibriTTS"
dataset_name = f"LibriTTS_{'_'.join(SUB_SET)}_{tokenizer}".replace("train-clean-", "").replace("train-other-", "")
save_dir = str(files("f5_tts").joinpath("../../")) + f"/data/{dataset_name}"
print(f"\nPrepare for {dataset_name}, will save to {save_dir}\n")
main()
# For LibriTTS_100_360_500_char, sample count: 354218
# For LibriTTS_100_360_500_char, vocab size is: 78
# For LibriTTS_100_360_500_char, total 554.09 hours
|