jam-alt / create_hf_dataset.py
cifkao's picture
Replace parquet files with jsonl files
c193ec6
raw
history blame
2.24 kB
from pathlib import Path
import shutil
import datasets
def main():
datasets.disable_caching()
# Divide the MP3 files by language. Hugging Face requires each subset and its metadadta to be in
# a separate directory. However, for backwards compatibility, we also want to keep the top-level
# "audio" directory and replace the MP3 files with symlinks into the subsets.
subsets_dir = Path("subsets")
if subsets_dir.exists():
shutil.rmtree(subsets_dir)
subsets_dir.mkdir()
# Back up the directory with the original MP3 files
if not Path("audio_orig").exists():
Path("audio").rename("audio_orig")
elif Path("audio").exists():
shutil.rmtree("audio")
Path("audio").mkdir(exist_ok=True)
# Create language subsets and:
# - hard link the files from mp3_orig to subsets
# - add symlinks from mp3 into subsets
for config_name in ["all", "en", "es", "de", "fr"]:
subset_dir = Path(".") if config_name == "all" else subsets_dir / config_name
subset_dir.mkdir(exist_ok=True)
dataset = datasets.load_dataset(
"./loader.py",
config_name,
trust_remote_code=True,
split="test",
)
if config_name == "all":
dataset = dataset.add_column(
"file_name",
[
str(Path("subsets") / lg / "audio" / f"{n}.mp3")
for lg, n in zip(dataset["language"], dataset["name"])
],
)
else:
dataset = dataset.add_column(
"file_name", [str(Path("audio") / f"{n}.mp3") for n in dataset["name"]]
)
dataset = dataset.remove_columns("audio")
dataset.to_json(subset_dir / "metadata.jsonl")
if config_name != "all":
(subset_dir / "audio").mkdir()
for name in dataset["name"]:
(subset_dir / "audio" / f"{name}.mp3").hardlink_to(
Path("audio_orig") / f"{name}.mp3"
)
(Path("audio") / f"{name}.mp3").symlink_to(
Path("..") / subset_dir / "audio" / f"{name}.mp3"
)
if __name__ == "__main__":
main()