Gregniuki commited on
Commit
86472e7
·
verified ·
1 Parent(s): 8c200a1

Delete model/cfm.py

Browse files
Files changed (1) hide show
  1. model/cfm.py +0 -285
model/cfm.py DELETED
@@ -1,285 +0,0 @@
1
- """
2
- ein notation:
3
- b - batch
4
- n - sequence
5
- nt - text sequence
6
- nw - raw wave length
7
- d - dimension
8
- """
9
-
10
- from __future__ import annotations
11
-
12
- from random import random
13
- from typing import Callable
14
-
15
- import torch
16
- import torch.nn.functional as F
17
- from torch import nn
18
- from torch.nn.utils.rnn import pad_sequence
19
- from torchdiffeq import odeint
20
-
21
- from f5_tts.model.modules import MelSpec
22
- from f5_tts.model.utils import (
23
- default,
24
- exists,
25
- lens_to_mask,
26
- list_str_to_idx,
27
- list_str_to_tensor,
28
- mask_from_frac_lengths,
29
- )
30
-
31
-
32
- class CFM(nn.Module):
33
- def __init__(
34
- self,
35
- transformer: nn.Module,
36
- sigma=0.0,
37
- odeint_kwargs: dict = dict(
38
- # atol = 1e-5,
39
- # rtol = 1e-5,
40
- method="euler" # 'midpoint'
41
- ),
42
- audio_drop_prob=0.3,
43
- cond_drop_prob=0.2,
44
- num_channels=None,
45
- mel_spec_module: nn.Module | None = None,
46
- mel_spec_kwargs: dict = dict(),
47
- frac_lengths_mask: tuple[float, float] = (0.7, 1.0),
48
- vocab_char_map: dict[str:int] | None = None,
49
- ):
50
- super().__init__()
51
-
52
- self.frac_lengths_mask = frac_lengths_mask
53
-
54
- # mel spec
55
- self.mel_spec = default(mel_spec_module, MelSpec(**mel_spec_kwargs))
56
- num_channels = default(num_channels, self.mel_spec.n_mel_channels)
57
- self.num_channels = num_channels
58
-
59
- # classifier-free guidance
60
- self.audio_drop_prob = audio_drop_prob
61
- self.cond_drop_prob = cond_drop_prob
62
-
63
- # transformer
64
- self.transformer = transformer
65
- dim = transformer.dim
66
- self.dim = dim
67
-
68
- # conditional flow related
69
- self.sigma = sigma
70
-
71
- # sampling related
72
- self.odeint_kwargs = odeint_kwargs
73
-
74
- # vocab map for tokenization
75
- self.vocab_char_map = vocab_char_map
76
-
77
- @property
78
- def device(self):
79
- return next(self.parameters()).device
80
-
81
- @torch.no_grad()
82
- def sample(
83
- self,
84
- cond: float["b n d"] | float["b nw"], # noqa: F722
85
- text: int["b nt"] | list[str], # noqa: F722
86
- duration: int | int["b"], # noqa: F821
87
- *,
88
- lens: int["b"] | None = None, # noqa: F821
89
- steps=32,
90
- cfg_strength=1.0,
91
- sway_sampling_coef=None,
92
- seed: int | None = None,
93
- max_duration=4096,
94
- vocoder: Callable[[float["b d n"]], float["b nw"]] | None = None, # noqa: F722
95
- no_ref_audio=False,
96
- duplicate_test=False,
97
- t_inter=0.1,
98
- edit_mask=None,
99
- ):
100
- self.eval()
101
- # raw wave
102
-
103
- if cond.ndim == 2:
104
- cond = self.mel_spec(cond)
105
- cond = cond.permute(0, 2, 1)
106
- assert cond.shape[-1] == self.num_channels
107
-
108
- cond = cond.to(next(self.parameters()).dtype)
109
-
110
- batch, cond_seq_len, device = *cond.shape[:2], cond.device
111
- if not exists(lens):
112
- lens = torch.full((batch,), cond_seq_len, device=device, dtype=torch.long)
113
-
114
- # text
115
-
116
- if isinstance(text, list):
117
- if exists(self.vocab_char_map):
118
- text = list_str_to_idx(text, self.vocab_char_map).to(device)
119
- else:
120
- text = list_str_to_tensor(text).to(device)
121
- assert text.shape[0] == batch
122
-
123
- if exists(text):
124
- text_lens = (text != -1).sum(dim=-1)
125
- lens = torch.maximum(text_lens, lens) # make sure lengths are at least those of the text characters
126
-
127
- # duration
128
-
129
- cond_mask = lens_to_mask(lens)
130
- if edit_mask is not None:
131
- cond_mask = cond_mask & edit_mask
132
-
133
- if isinstance(duration, int):
134
- duration = torch.full((batch,), duration, device=device, dtype=torch.long)
135
-
136
- duration = torch.maximum(lens + 1, duration) # just add one token so something is generated
137
- duration = duration.clamp(max=max_duration)
138
- max_duration = duration.amax()
139
-
140
- # duplicate test corner for inner time step oberservation
141
- if duplicate_test:
142
- test_cond = F.pad(cond, (0, 0, cond_seq_len, max_duration - 2 * cond_seq_len), value=0.0)
143
-
144
- cond = F.pad(cond, (0, 0, 0, max_duration - cond_seq_len), value=0.0)
145
- cond_mask = F.pad(cond_mask, (0, max_duration - cond_mask.shape[-1]), value=False)
146
- cond_mask = cond_mask.unsqueeze(-1)
147
- step_cond = torch.where(
148
- cond_mask, cond, torch.zeros_like(cond)
149
- ) # allow direct control (cut cond audio) with lens passed in
150
-
151
- if batch > 1:
152
- mask = lens_to_mask(duration)
153
- else: # save memory and speed up, as single inference need no mask currently
154
- mask = None
155
-
156
- # test for no ref audio
157
- if no_ref_audio:
158
- cond = torch.zeros_like(cond)
159
-
160
- # neural ode
161
-
162
- def fn(t, x):
163
- # at each step, conditioning is fixed
164
- # step_cond = torch.where(cond_mask, cond, torch.zeros_like(cond))
165
-
166
- # predict flow
167
- pred = self.transformer(
168
- x=x, cond=step_cond, text=text, time=t, mask=mask, drop_audio_cond=False, drop_text=False
169
- )
170
- if cfg_strength < 1e-5:
171
- return pred
172
-
173
- null_pred = self.transformer(
174
- x=x, cond=step_cond, text=text, time=t, mask=mask, drop_audio_cond=True, drop_text=True
175
- )
176
- return pred + (pred - null_pred) * cfg_strength
177
-
178
- # noise input
179
- # to make sure batch inference result is same with different batch size, and for sure single inference
180
- # still some difference maybe due to convolutional layers
181
- y0 = []
182
- for dur in duration:
183
- if exists(seed):
184
- torch.manual_seed(seed)
185
- y0.append(torch.randn(dur, self.num_channels, device=self.device, dtype=step_cond.dtype))
186
- y0 = pad_sequence(y0, padding_value=0, batch_first=True)
187
-
188
- t_start = 0
189
-
190
- # duplicate test corner for inner time step oberservation
191
- if duplicate_test:
192
- t_start = t_inter
193
- y0 = (1 - t_start) * y0 + t_start * test_cond
194
- steps = int(steps * (1 - t_start))
195
-
196
- t = torch.linspace(t_start, 1, steps, device=self.device, dtype=step_cond.dtype)
197
- if sway_sampling_coef is not None:
198
- t = t + sway_sampling_coef * (torch.cos(torch.pi / 2 * t) - 1 + t)
199
-
200
- trajectory = odeint(fn, y0, t, **self.odeint_kwargs)
201
-
202
- sampled = trajectory[-1]
203
- out = sampled
204
- out = torch.where(cond_mask, cond, out)
205
-
206
- if exists(vocoder):
207
- out = out.permute(0, 2, 1)
208
- out = vocoder(out)
209
-
210
- return out, trajectory
211
-
212
- def forward(
213
- self,
214
- inp: float["b n d"] | float["b nw"], # mel or raw wave # noqa: F722
215
- text: int["b nt"] | list[str], # noqa: F722
216
- *,
217
- lens: int["b"] | None = None, # noqa: F821
218
- noise_scheduler: str | None = None,
219
- ):
220
- # handle raw wave
221
- if inp.ndim == 2:
222
- inp = self.mel_spec(inp)
223
- inp = inp.permute(0, 2, 1)
224
- assert inp.shape[-1] == self.num_channels
225
-
226
- batch, seq_len, dtype, device, _σ1 = *inp.shape[:2], inp.dtype, self.device, self.sigma
227
-
228
- # handle text as string
229
- if isinstance(text, list):
230
- if exists(self.vocab_char_map):
231
- text = list_str_to_idx(text, self.vocab_char_map).to(device)
232
- else:
233
- text = list_str_to_tensor(text).to(device)
234
- assert text.shape[0] == batch
235
-
236
- # lens and mask
237
- if not exists(lens):
238
- lens = torch.full((batch,), seq_len, device=device)
239
-
240
- mask = lens_to_mask(lens, length=seq_len) # useless here, as collate_fn will pad to max length in batch
241
-
242
- # get a random span to mask out for training conditionally
243
- frac_lengths = torch.zeros((batch,), device=self.device).float().uniform_(*self.frac_lengths_mask)
244
- rand_span_mask = mask_from_frac_lengths(lens, frac_lengths)
245
-
246
- if exists(mask):
247
- rand_span_mask &= mask
248
-
249
- # mel is x1
250
- x1 = inp
251
-
252
- # x0 is gaussian noise
253
- x0 = torch.randn_like(x1)
254
-
255
- # time step
256
- time = torch.rand((batch,), dtype=dtype, device=self.device)
257
- # TODO. noise_scheduler
258
-
259
- # sample xt (φ_t(x) in the paper)
260
- t = time.unsqueeze(-1).unsqueeze(-1)
261
- φ = (1 - t) * x0 + t * x1
262
- flow = x1 - x0
263
-
264
- # only predict what is within the random mask span for infilling
265
- cond = torch.where(rand_span_mask[..., None], torch.zeros_like(x1), x1)
266
-
267
- # transformer and cfg training with a drop rate
268
- drop_audio_cond = random() < self.audio_drop_prob # p_drop in voicebox paper
269
- if random() < self.cond_drop_prob: # p_uncond in voicebox paper
270
- drop_audio_cond = True
271
- drop_text = True
272
- else:
273
- drop_text = False
274
-
275
- # if want rigourously mask out padding, record in collate_fn in dataset.py, and pass in here
276
- # adding mask will use more memory, thus also need to adjust batchsampler with scaled down threshold for long sequences
277
- pred = self.transformer(
278
- x=φ, cond=cond, text=text, time=time, drop_audio_cond=drop_audio_cond, drop_text=drop_text
279
- )
280
-
281
- # flow matching loss
282
- loss = F.mse_loss(pred, flow, reduction="none")
283
- loss = loss[rand_span_mask]
284
-
285
- return loss.mean(), cond, pred