Spaces:
Runtime error
Runtime error
Commit
·
7d6db8f
1
Parent(s):
dcaaa71
Add random deterministic chunking to dataloader
Browse files- README.md +3 -3
- config.yaml +4 -2
- remfx/datasets.py +47 -14
- scripts/train.py +1 -2
README.md
CHANGED
|
@@ -6,13 +6,13 @@
|
|
| 6 |
4. `pip install -e umx`
|
| 7 |
|
| 8 |
## Download [GuitarFX Dataset] (https://zenodo.org/record/7044411/)
|
| 9 |
-
`./download_egfx.sh`
|
| 10 |
|
| 11 |
## Train model
|
| 12 |
1. Change Wandb variables in `shell_vars.sh`
|
| 13 |
-
2. `python train.py exp=audio_diffusion`
|
| 14 |
or
|
| 15 |
-
2. `python train.py exp=umx`
|
| 16 |
|
| 17 |
To add gpu, add `trainer.accelerator='gpu' trainer.devices=-1` to the command-line
|
| 18 |
|
|
|
|
| 6 |
4. `pip install -e umx`
|
| 7 |
|
| 8 |
## Download [GuitarFX Dataset] (https://zenodo.org/record/7044411/)
|
| 9 |
+
`./scripts/download_egfx.sh`
|
| 10 |
|
| 11 |
## Train model
|
| 12 |
1. Change Wandb variables in `shell_vars.sh`
|
| 13 |
+
2. `python scripts/train.py exp=audio_diffusion`
|
| 14 |
or
|
| 15 |
+
2. `python scripts/train.py exp=umx`
|
| 16 |
|
| 17 |
To add gpu, add `trainer.accelerator='gpu' trainer.devices=-1` to the command-line
|
| 18 |
|
config.yaml
CHANGED
|
@@ -20,12 +20,14 @@ callbacks:
|
|
| 20 |
filename: '{epoch:02d}-{valid_loss:.3f}'
|
| 21 |
|
| 22 |
datamodule:
|
| 23 |
-
_target_: datasets.Datamodule
|
| 24 |
dataset:
|
| 25 |
-
_target_: datasets.GuitarFXDataset
|
| 26 |
sample_rate: ${sample_rate}
|
| 27 |
root: ${oc.env:DATASET_ROOT}
|
| 28 |
length: ${length}
|
|
|
|
|
|
|
| 29 |
val_split: 0.2
|
| 30 |
batch_size: 16
|
| 31 |
num_workers: 8
|
|
|
|
| 20 |
filename: '{epoch:02d}-{valid_loss:.3f}'
|
| 21 |
|
| 22 |
datamodule:
|
| 23 |
+
_target_: remfx.datasets.Datamodule
|
| 24 |
dataset:
|
| 25 |
+
_target_: remfx.datasets.GuitarFXDataset
|
| 26 |
sample_rate: ${sample_rate}
|
| 27 |
root: ${oc.env:DATASET_ROOT}
|
| 28 |
length: ${length}
|
| 29 |
+
chunk_size_in_sec: 3
|
| 30 |
+
num_chunks: 10
|
| 31 |
val_split: 0.2
|
| 32 |
batch_size: 16
|
| 33 |
num_workers: 8
|
remfx/datasets.py
CHANGED
|
@@ -1,15 +1,17 @@
|
|
|
|
|
| 1 |
from torch.utils.data import Dataset, DataLoader, random_split
|
| 2 |
import torchaudio
|
| 3 |
import torchaudio.transforms as T
|
| 4 |
import torch.nn.functional as F
|
| 5 |
from pathlib import Path
|
| 6 |
import pytorch_lightning as pl
|
| 7 |
-
from typing import Any, List
|
| 8 |
|
| 9 |
# https://zenodo.org/record/7044411/
|
| 10 |
|
| 11 |
LENGTH = 2**18 # 12 seconds
|
| 12 |
ORIG_SR = 48000
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
class GuitarFXDataset(Dataset):
|
|
@@ -18,13 +20,18 @@ class GuitarFXDataset(Dataset):
|
|
| 18 |
root: str,
|
| 19 |
sample_rate: int,
|
| 20 |
length: int = LENGTH,
|
|
|
|
|
|
|
| 21 |
effect_types: List[str] = None,
|
| 22 |
):
|
| 23 |
self.length = length
|
| 24 |
self.wet_files = []
|
| 25 |
self.dry_files = []
|
|
|
|
| 26 |
self.labels = []
|
| 27 |
self.root = Path(root)
|
|
|
|
|
|
|
| 28 |
|
| 29 |
if effect_types is None:
|
| 30 |
effect_types = [
|
|
@@ -32,38 +39,64 @@ class GuitarFXDataset(Dataset):
|
|
| 32 |
]
|
| 33 |
for i, effect in enumerate(effect_types):
|
| 34 |
for pickup in Path(self.root / effect).iterdir():
|
| 35 |
-
|
| 36 |
-
|
| 37 |
list(self.root.glob(f"Clean/{pickup.name}/**/*.wav"))
|
| 38 |
)
|
| 39 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
print(
|
| 41 |
-
f"Found {len(self.wet_files)} wet files and {len(self.dry_files)} dry files"
|
|
|
|
| 42 |
)
|
| 43 |
self.resampler = T.Resample(ORIG_SR, sample_rate)
|
| 44 |
|
| 45 |
def __len__(self):
|
| 46 |
-
return len(self.
|
| 47 |
|
| 48 |
def __getitem__(self, idx):
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
resampled_x = self.resampler(x)
|
| 54 |
resampled_y = self.resampler(y)
|
| 55 |
-
# Pad
|
| 56 |
if resampled_x.shape[-1] < self.length:
|
| 57 |
resampled_x = F.pad(resampled_x, (0, self.length - resampled_x.shape[1]))
|
| 58 |
-
elif resampled_x.shape[-1] > self.length:
|
| 59 |
-
resampled_x = resampled_x[:, : self.length]
|
| 60 |
if resampled_y.shape[-1] < self.length:
|
| 61 |
resampled_y = F.pad(resampled_y, (0, self.length - resampled_y.shape[1]))
|
| 62 |
-
elif resampled_y.shape[-1] > self.length:
|
| 63 |
-
resampled_y = resampled_y[:, : self.length]
|
| 64 |
return (resampled_x, resampled_y, effect_label)
|
| 65 |
|
| 66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
class Datamodule(pl.LightningDataModule):
|
| 68 |
def __init__(
|
| 69 |
self,
|
|
|
|
| 1 |
+
import torch
|
| 2 |
from torch.utils.data import Dataset, DataLoader, random_split
|
| 3 |
import torchaudio
|
| 4 |
import torchaudio.transforms as T
|
| 5 |
import torch.nn.functional as F
|
| 6 |
from pathlib import Path
|
| 7 |
import pytorch_lightning as pl
|
| 8 |
+
from typing import Any, List, Tuple
|
| 9 |
|
| 10 |
# https://zenodo.org/record/7044411/
|
| 11 |
|
| 12 |
LENGTH = 2**18 # 12 seconds
|
| 13 |
ORIG_SR = 48000
|
| 14 |
+
torch.manual_seed(123)
|
| 15 |
|
| 16 |
|
| 17 |
class GuitarFXDataset(Dataset):
|
|
|
|
| 20 |
root: str,
|
| 21 |
sample_rate: int,
|
| 22 |
length: int = LENGTH,
|
| 23 |
+
chunk_size_in_sec: int = 3,
|
| 24 |
+
num_chunks: int = 10,
|
| 25 |
effect_types: List[str] = None,
|
| 26 |
):
|
| 27 |
self.length = length
|
| 28 |
self.wet_files = []
|
| 29 |
self.dry_files = []
|
| 30 |
+
self.chunks = []
|
| 31 |
self.labels = []
|
| 32 |
self.root = Path(root)
|
| 33 |
+
self.chunk_size_in_sec = chunk_size_in_sec
|
| 34 |
+
self.num_chunks = num_chunks
|
| 35 |
|
| 36 |
if effect_types is None:
|
| 37 |
effect_types = [
|
|
|
|
| 39 |
]
|
| 40 |
for i, effect in enumerate(effect_types):
|
| 41 |
for pickup in Path(self.root / effect).iterdir():
|
| 42 |
+
wet_files = sorted(list(pickup.glob("*.wav")))
|
| 43 |
+
dry_files = sorted(
|
| 44 |
list(self.root.glob(f"Clean/{pickup.name}/**/*.wav"))
|
| 45 |
)
|
| 46 |
+
self.wet_files += wet_files
|
| 47 |
+
self.dry_files += dry_files
|
| 48 |
+
self.labels += [i] * len(wet_files)
|
| 49 |
+
for audio_file in wet_files:
|
| 50 |
+
chunks = create_random_chunks(
|
| 51 |
+
audio_file, self.chunk_size_in_sec, self.num_chunks
|
| 52 |
+
)
|
| 53 |
+
self.chunks += chunks
|
| 54 |
print(
|
| 55 |
+
f"Found {len(self.wet_files)} wet files and {len(self.dry_files)} dry files.\n"
|
| 56 |
+
f"Total chunks: {len(self.chunks)}"
|
| 57 |
)
|
| 58 |
self.resampler = T.Resample(ORIG_SR, sample_rate)
|
| 59 |
|
| 60 |
def __len__(self):
|
| 61 |
+
return len(self.chunks)
|
| 62 |
|
| 63 |
def __getitem__(self, idx):
|
| 64 |
+
# Load effected and "clean" audio
|
| 65 |
+
song_idx = idx // self.num_chunks
|
| 66 |
+
x, sr = torchaudio.load(self.wet_files[song_idx])
|
| 67 |
+
y, sr = torchaudio.load(self.dry_files[song_idx])
|
| 68 |
+
effect_label = self.labels[song_idx] # Effect label
|
| 69 |
+
|
| 70 |
+
chunk_indices = self.chunks[idx]
|
| 71 |
+
x = x[:, chunk_indices[0] : chunk_indices[1]]
|
| 72 |
+
y = y[:, chunk_indices[0] : chunk_indices[1]]
|
| 73 |
|
| 74 |
resampled_x = self.resampler(x)
|
| 75 |
resampled_y = self.resampler(y)
|
| 76 |
+
# Pad to length if needed
|
| 77 |
if resampled_x.shape[-1] < self.length:
|
| 78 |
resampled_x = F.pad(resampled_x, (0, self.length - resampled_x.shape[1]))
|
|
|
|
|
|
|
| 79 |
if resampled_y.shape[-1] < self.length:
|
| 80 |
resampled_y = F.pad(resampled_y, (0, self.length - resampled_y.shape[1]))
|
|
|
|
|
|
|
| 81 |
return (resampled_x, resampled_y, effect_label)
|
| 82 |
|
| 83 |
|
| 84 |
+
def create_random_chunks(
|
| 85 |
+
audio_file: str, chunk_size: int, num_chunks: int
|
| 86 |
+
) -> List[Tuple[int, int]]:
|
| 87 |
+
"""Create random chunks of size chunk_size (seconds) from an audio file.
|
| 88 |
+
Return sample_indices
|
| 89 |
+
"""
|
| 90 |
+
audio, sr = torchaudio.load(audio_file)
|
| 91 |
+
chunk_size_in_samples = chunk_size * sr
|
| 92 |
+
chunks = []
|
| 93 |
+
for i in range(num_chunks):
|
| 94 |
+
start = torch.randint(0, audio.shape[-1] - chunk_size_in_samples, (1,)).item()
|
| 95 |
+
end = start + chunk_size_in_samples
|
| 96 |
+
chunks.append((start, end))
|
| 97 |
+
return chunks
|
| 98 |
+
|
| 99 |
+
|
| 100 |
class Datamodule(pl.LightningDataModule):
|
| 101 |
def __init__(
|
| 102 |
self,
|
scripts/train.py
CHANGED
|
@@ -6,7 +6,7 @@ import remfx.utils as utils
|
|
| 6 |
log = utils.get_logger(__name__)
|
| 7 |
|
| 8 |
|
| 9 |
-
@hydra.main(version_base=None, config_path="
|
| 10 |
def main(cfg: DictConfig):
|
| 11 |
# Apply seed for reproducibility
|
| 12 |
print(cfg)
|
|
@@ -14,7 +14,6 @@ def main(cfg: DictConfig):
|
|
| 14 |
|
| 15 |
log.info(f"Instantiating datamodule <{cfg.datamodule._target_}>.")
|
| 16 |
datamodule = hydra.utils.instantiate(cfg.datamodule, _convert_="partial")
|
| 17 |
-
|
| 18 |
log.info(f"Instantiating model <{cfg.model._target_}>.")
|
| 19 |
model = hydra.utils.instantiate(cfg.model, _convert_="partial")
|
| 20 |
|
|
|
|
| 6 |
log = utils.get_logger(__name__)
|
| 7 |
|
| 8 |
|
| 9 |
+
@hydra.main(version_base=None, config_path="../", config_name="config.yaml")
|
| 10 |
def main(cfg: DictConfig):
|
| 11 |
# Apply seed for reproducibility
|
| 12 |
print(cfg)
|
|
|
|
| 14 |
|
| 15 |
log.info(f"Instantiating datamodule <{cfg.datamodule._target_}>.")
|
| 16 |
datamodule = hydra.utils.instantiate(cfg.datamodule, _convert_="partial")
|
|
|
|
| 17 |
log.info(f"Instantiating model <{cfg.model._target_}>.")
|
| 18 |
model = hydra.utils.instantiate(cfg.model, _convert_="partial")
|
| 19 |
|