Dionyssos commited on
Commit
3eec6d2
·
1 Parent(s): d9889a1

LM has virtual 2nd batch

Browse files
audiocraft/audiogen.py CHANGED
@@ -11,9 +11,160 @@ and provide easy access to the generation API.
11
 
12
  import typing as tp
13
  import torch
14
-
15
- from audiocraft.genmodel import BaseGenModel
16
  from audiocraft.loaders import load_compression_model, load_lm_model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  class AudioGen(BaseGenModel):
19
 
 
11
 
12
  import typing as tp
13
  import torch
 
 
14
  from audiocraft.loaders import load_compression_model, load_lm_model
15
+ import typing as tp
16
+ import omegaconf
17
+ import torch
18
+ import numpy as np
19
+ from abc import ABC, abstractmethod
20
+ from .lm import LMModel
21
+ from .conditioners import ConditioningAttributes
22
+ from .utils.autocast import TorchAutocast
23
+
24
+
25
+
26
+ def _shift(x):
27
+ n = x.shape[2]
28
+ i = np.random.randint(.24 * n, max(1, .74 * n)) # high should be above >= 0 TBD do we have very short segments
29
+ x = torch.roll(x, i, dims=2)
30
+ return x
31
+
32
+
33
+ class BaseGenModel(ABC):
34
+ """Base generative model with convenient generation API.
35
+
36
+ Args:
37
+ name (str)
38
+ compression_model (CompressionModel): Encodec with Seanet Decoder
39
+ lm
40
+ max_duration (float, optional): As is using top250 token draw() we can gen xN sequences
41
+ """
42
+ def __init__(self,
43
+ name,
44
+ compression_model,
45
+ lm,
46
+ max_duration=None):
47
+ self.name = name
48
+ self.compression_model = compression_model
49
+ self.lm = lm
50
+ self.cfg: tp.Optional[omegaconf.DictConfig] = None
51
+ # Just to be safe, let's put everything in eval mode.
52
+ self.compression_model.eval()
53
+ self.lm.eval()
54
+
55
+ if hasattr(lm, 'cfg'):
56
+ cfg = lm.cfg
57
+ assert isinstance(cfg, omegaconf.DictConfig)
58
+ self.cfg = cfg
59
+
60
+ if max_duration is None:
61
+ if self.cfg is not None:
62
+ max_duration = lm.cfg.dataset.segment_duration # type: ignore
63
+ else:
64
+ raise ValueError("You must provide max_duration when building directly your GenModel")
65
+ assert max_duration is not None
66
+
67
+ self.max_duration: float = max_duration
68
+ self.duration = self.max_duration
69
+ self.device = next(iter(lm.parameters())).device
70
+ self.generation_params={}
71
+
72
+ if self.device.type == 'cpu':
73
+ self.autocast = TorchAutocast(enabled=False)
74
+ else:
75
+ self.autocast = TorchAutocast(
76
+ enabled=True,
77
+ device_type=self.device.type,
78
+ dtype=torch.float16)
79
+
80
+ @property
81
+ def frame_rate(self) -> float:
82
+ """Roughly the number of AR steps per seconds."""
83
+ return self.compression_model.frame_rate
84
+
85
+ @property
86
+ def sample_rate(self) -> int:
87
+ """Sample rate of the generated audio."""
88
+ return self.compression_model.sample_rate
89
+
90
+ @property
91
+ def audio_channels(self) -> int:
92
+ """Audio channels of the generated audio."""
93
+ return self.compression_model.channels
94
+
95
+ @torch.no_grad()
96
+ def _prepare_tokens_and_attributes(
97
+ self,
98
+ descriptions,
99
+ prompt,
100
+ ):
101
+ attributes = [
102
+ ConditioningAttributes(text={'description': description}) for description in descriptions]
103
+ prompt_tokens = None
104
+ return attributes, prompt_tokens
105
+
106
+ def generate_unconditional(self,
107
+ num_samples,
108
+ progress=False,
109
+ return_tokens=False):
110
+ descriptions: tp.List[tp.Optional[str]] = [None] * num_samples
111
+ attributes, _ = self._prepare_tokens_and_attributes(descriptions, None)
112
+ tokens = self._generate_tokens(attributes)
113
+ if return_tokens:
114
+ return self.generate_audio(tokens), tokens
115
+ return self.generate_audio(tokens)
116
+
117
+ def generate(self,
118
+ descriptions,
119
+ progress=False,
120
+ return_tokens=False):
121
+ attributes, _ = self._prepare_tokens_and_attributes(descriptions, None)
122
+ tokens = self._generate_tokens(attributes)
123
+ if return_tokens:
124
+ return self.generate_audio(tokens), tokens
125
+ return self.generate_audio(tokens)
126
+
127
+ def _generate_tokens(self, attributes,
128
+ prompt_tokens=None,
129
+ progress=False):
130
+
131
+ total_gen_len = int(self.duration * self.frame_rate)
132
+ max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
133
+ current_gen_offset: int = 0
134
+
135
+
136
+
137
+
138
+
139
+ if self.duration <= self.max_duration:
140
+ # generate by sampling from LM, simple case.
141
+
142
+ with self.autocast:
143
+ gen_tokens = self.lm.generate(conditions=attributes,
144
+ callback=None,
145
+ max_gen_len=total_gen_len,
146
+ **self.generation_params)
147
+ else:
148
+ print('<>Long gen ?<>')
149
+ # print(f'{gen_tokens.shape=}') # [5,4,35]
150
+ # FLATTEN BATCH AS EXTRA SEQUENCE (BATCH IS VIRTUAL JUST MULTINOMIAL SAMPLING OF N_DRAW TOKENS)
151
+ gen_tokens = gen_tokens.transpose(0, 1).reshape(4, -1)[None, :, :]
152
+ for _ in range(3):
153
+ print(gen_tokens.shape)
154
+ gen_tokens = _shift(gen_tokens)
155
+ return gen_tokens
156
+
157
+ def generate_audio(self, gen_tokens: torch.Tensor) -> torch.Tensor:
158
+ """Generate Audio from tokens."""
159
+ assert gen_tokens.dim() == 3
160
+ with torch.no_grad():
161
+ gen_audio = self.compression_model.decode(gen_tokens, None)
162
+ return gen_audio
163
+
164
+
165
+
166
+
167
+
168
 
169
  class AudioGen(BaseGenModel):
170
 
audiocraft/conditioners.py CHANGED
@@ -385,19 +385,9 @@ class ConditionFuser(StreamingModule):
385
 
386
  def forward(
387
  self,
388
- input: torch.Tensor,
389
- conditions: tp.Dict[str, ConditionType]
390
- ) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
391
- """Fuse the conditions to the provided model input.
392
-
393
- Args:
394
- input (torch.Tensor): Transformer input.
395
- conditions (dict[str, ConditionType]): Dict of conditions.
396
- Returns:
397
- tuple[torch.Tensor, torch.Tensor]: The first tensor is the transformer input
398
- after the conditions have been fused. The second output tensor is the tensor
399
- used for cross-attention or None if no cross attention inputs exist.
400
- """
401
  B, T, _ = input.shape
402
 
403
 
 
385
 
386
  def forward(
387
  self,
388
+ input,
389
+ conditions):
390
+
 
 
 
 
 
 
 
 
 
 
391
  B, T, _ = input.shape
392
 
393
 
audiocraft/genmodel.py DELETED
@@ -1,144 +0,0 @@
1
- import typing as tp
2
- import omegaconf
3
- import torch
4
-
5
- from abc import ABC, abstractmethod
6
-
7
- from .lm import LMModel
8
- from .conditioners import ConditioningAttributes
9
- from .utils.autocast import TorchAutocast
10
-
11
-
12
- class BaseGenModel(ABC):
13
- """Base generative model with convenient generation API.
14
-
15
- Args:
16
- name (str): name of the model.
17
- compression_model (CompressionModel): Encodec with Seanet Decoder
18
- lm (LMModel): Language model over discrete representations
19
- max_duration (float, optional): As is using top250 token draw() we can gen xN sequences
20
- """
21
- def __init__(self, name: str, compression_model, lm: LMModel,
22
- max_duration: tp.Optional[float] = None):
23
- self.name = name
24
- self.compression_model = compression_model
25
- self.lm = lm
26
- self.cfg: tp.Optional[omegaconf.DictConfig] = None
27
- # Just to be safe, let's put everything in eval mode.
28
- self.compression_model.eval()
29
- self.lm.eval()
30
-
31
- if hasattr(lm, 'cfg'):
32
- cfg = lm.cfg
33
- assert isinstance(cfg, omegaconf.DictConfig)
34
- self.cfg = cfg
35
-
36
- if max_duration is None:
37
- if self.cfg is not None:
38
- max_duration = lm.cfg.dataset.segment_duration # type: ignore
39
- else:
40
- raise ValueError("You must provide max_duration when building directly your GenModel")
41
- assert max_duration is not None
42
-
43
- self.max_duration: float = max_duration
44
- self.duration = self.max_duration
45
- self.device = next(iter(lm.parameters())).device
46
- self.generation_params={}
47
-
48
- if self.device.type == 'cpu':
49
- self.autocast = TorchAutocast(enabled=False)
50
- else:
51
- self.autocast = TorchAutocast(
52
- enabled=True,
53
- device_type=self.device.type,
54
- dtype=torch.float16)
55
-
56
- @property
57
- def frame_rate(self) -> float:
58
- """Roughly the number of AR steps per seconds."""
59
- return self.compression_model.frame_rate
60
-
61
- @property
62
- def sample_rate(self) -> int:
63
- """Sample rate of the generated audio."""
64
- return self.compression_model.sample_rate
65
-
66
- @property
67
- def audio_channels(self) -> int:
68
- """Audio channels of the generated audio."""
69
- return self.compression_model.channels
70
-
71
-
72
-
73
- @abstractmethod
74
- def set_generation_params(self, *args, **kwargs):
75
- """Set the generation parameters."""
76
- raise NotImplementedError("No base implementation for setting generation params.")
77
-
78
- @staticmethod
79
- @abstractmethod
80
- def get_pretrained(name: str, device=None):
81
- raise NotImplementedError("No base implementation for getting pretrained model")
82
-
83
- @torch.no_grad()
84
- def _prepare_tokens_and_attributes(
85
- self,
86
- descriptions,
87
- prompt,
88
- ):
89
- attributes = [
90
- ConditioningAttributes(text={'description': description}) for description in descriptions]
91
- prompt_tokens = None
92
- return attributes, prompt_tokens
93
-
94
- def generate_unconditional(self,
95
- num_samples,
96
- progress=False,
97
- return_tokens=False):
98
- descriptions: tp.List[tp.Optional[str]] = [None] * num_samples
99
- attributes, _ = self._prepare_tokens_and_attributes(descriptions, None)
100
- tokens = self._generate_tokens(attributes)
101
- if return_tokens:
102
- return self.generate_audio(tokens), tokens
103
- return self.generate_audio(tokens)
104
-
105
- def generate(self, descriptions, progress = False, return_tokens= False):
106
- attributes, _ = self._prepare_tokens_and_attributes(descriptions, None)
107
- tokens = self._generate_tokens(attributes)
108
- if return_tokens:
109
- return self.generate_audio(tokens), tokens
110
- return self.generate_audio(tokens)
111
-
112
- def _generate_tokens(self, attributes,
113
- prompt_tokens=None,
114
- progress=False):
115
-
116
- total_gen_len = int(self.duration * self.frame_rate)
117
- max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)
118
- current_gen_offset: int = 0
119
-
120
-
121
-
122
-
123
-
124
- if self.duration <= self.max_duration:
125
- # generate by sampling from LM, simple case.
126
-
127
- with self.autocast:
128
- gen_tokens = self.lm.generate(conditions=attributes,
129
- callback=None,
130
- max_gen_len=total_gen_len,
131
- **self.generation_params)
132
- else:
133
- print('<>Long gen ?<>')
134
- # print(f'{gen_tokens.shape=}') # [5,4,35]
135
- # FLATTEN BATCH AS EXTRA SEQUENCE (BATCH IS VIRTUAL JUST MULTINOMIAL SAMPLING OF N_DRAW TOKENS)
136
- gen_tokens = gen_tokens.transpose(0, 1).reshape(4, -1)[None, :, :]
137
- return gen_tokens
138
-
139
- def generate_audio(self, gen_tokens: torch.Tensor) -> torch.Tensor:
140
- """Generate Audio from tokens."""
141
- assert gen_tokens.dim() == 3
142
- with torch.no_grad():
143
- gen_audio = self.compression_model.decode(gen_tokens, None)
144
- return gen_audio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
audiocraft/lm.py CHANGED
@@ -148,7 +148,7 @@ class LMModel(StreamingModule):
148
  super().__init__()
149
  self.cfg_coef = cfg_coef
150
 
151
- self.n_draw = 20
152
  self.condition_provider = condition_provider
153
  self.fuser = fuser
154
  self.card = card # 2048 ?
@@ -213,24 +213,22 @@ class LMModel(StreamingModule):
213
  def num_codebooks(self) -> int:
214
  return self.n_q
215
 
216
- def forward(self,
217
  sequence,
218
  conditions,
219
  condition_tensors=None,
220
  stage = -1):
221
  B, K, S = sequence.shape
222
- assert K == self.num_codebooks, "Sequence shape must match the specified number of codebooks"
 
223
  input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
224
- if condition_tensors is None:
225
- assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
226
-
227
- # encode conditions and fuse, both have a streaming cache to not recompute when generating.
228
- condition_tensors = self.condition_provider(tokenized)
229
- else:
230
- assert not conditions, "Shouldn't pass both conditions and condition_tensors."
231
 
232
  input_, cross_attention_input = self.fuser(input_, condition_tensors) # DEFINE conditioners.py
233
-
 
 
 
234
  out = self.transformer(input_, cross_attention_src=cross_attention_input,
235
  src_mask=(self.attn_mask_per_stage[stage] if stage >= 0 else None))
236
  if self.out_norm:
@@ -240,7 +238,7 @@ class LMModel(StreamingModule):
240
  # remove the prefix from the model outputs
241
  if len(self.fuser.fuse2cond['prepend']) > 0:
242
  logits = logits[:, :, -S:]
243
- print('PRESFIX')
244
 
245
  return logits # [B, K, S, card]
246
 
@@ -260,49 +258,20 @@ class LMModel(StreamingModule):
260
  cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
261
  model = self if self._fsdp is None else self._fsdp
262
  two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
263
- if two_step_cfg and cfg_conditions != {}:
264
- print('\nNOT HERE\n')
265
- else:
266
- print('C')
267
- assert isinstance(cfg_conditions, dict)
268
- condition_tensors = cfg_conditions
269
- if condition_tensors:
270
- print('\nDcat\n') # enters here
271
-
272
- sequence = torch.cat([sequence, sequence], dim=0) # if i concatenate
273
- # concatenates in batch but we only want to run 1st sequence - continutation
274
- # the other paths will build "BLindly"
275
- all_logits = model(
276
- sequence,
277
- conditions=[], condition_tensors=condition_tensors)
278
- if condition_tensors:
279
- cond_logits, uncond_logits = all_logits.split(B, dim=0) #torch.Size([2, 4, 1, 2048])
280
- # logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
281
- # logits = 3 * cond_logits - 2.4 * uncond_logits
282
- logits = 2 * cond_logits - 1.4 * uncond_logits
283
- else:
284
- print('\nF!\n')
285
-
286
-
287
- logits = logits.permute(0, 1, 3, 2) # [1, 4, 2048, 1]
288
- # No crop this is just squeeze() of time
289
- logits = logits[..., -1] # [1 x 4 x 2048]
290
-
291
-
292
- # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
293
 
294
- # print(f'\nR {temp=} {top_p=} {top_k=}\n') -------------> R temp=1.0 top_p=0.0 top_k=250
295
- # print(f'{temp=}') # 1.0
296
- probs = torch.softmax(logits / temp, dim=-1)
297
 
298
- next_token = utils.sample_top_k(probs, k=top_k, n_draw=self.n_draw)
299
 
 
 
 
300
 
301
- # th decoder will smooth the transitions
302
- # so if we have 2 tokens although the 2nd token we need it for replica later
303
- # so let it as batch and reshape at the final time-inversion
 
 
304
 
305
- # To return multiple tokens here (batch_size = num_draws)
306
  return next_token
307
 
308
  # GENERATE class revert_codebook_patterns()
@@ -385,7 +354,7 @@ class LMModel(StreamingModule):
385
  # but continue the sequence only with isingle next token
386
 
387
  for offset in range(1, gen_sequence_len): # start_offset_sequence=1
388
- print(f'{offset=}')
389
  # starts from 1 not 0 thus uses the 0:1 as curr sequence
390
  # although this is empty contains -1 ?
391
 
 
148
  super().__init__()
149
  self.cfg_coef = cfg_coef
150
 
151
+ self.n_draw = 24
152
  self.condition_provider = condition_provider
153
  self.fuser = fuser
154
  self.card = card # 2048 ?
 
213
  def num_codebooks(self) -> int:
214
  return self.n_q
215
 
216
+ def forward(self,
217
  sequence,
218
  conditions,
219
  condition_tensors=None,
220
  stage = -1):
221
  B, K, S = sequence.shape
222
+
223
+
224
  input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
225
+
 
 
 
 
 
 
226
 
227
  input_, cross_attention_input = self.fuser(input_, condition_tensors) # DEFINE conditioners.py
228
+
229
+ # print(f'{input_.shape=} {cross_attention_input.shape=} FUSER LLM FORw')
230
+ # input_.shape=torch.Size([1, 1, 1536]) cross_attention_input.shape=torch.Size([2, 7, 1536]) FUSER LLM FORw
231
+
232
  out = self.transformer(input_, cross_attention_src=cross_attention_input,
233
  src_mask=(self.attn_mask_per_stage[stage] if stage >= 0 else None))
234
  if self.out_norm:
 
238
  # remove the prefix from the model outputs
239
  if len(self.fuser.fuse2cond['prepend']) > 0:
240
  logits = logits[:, :, -S:]
241
+ print('==========================================PRESFIX')
242
 
243
  return logits # [B, K, S, card]
244
 
 
258
  cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
259
  model = self if self._fsdp is None else self._fsdp
260
  two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
261
+ condition_tensors = cfg_conditions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
 
 
 
263
 
 
264
 
265
+ logits = model(
266
+ sequence, # cond_logits = wav condition
267
+ conditions=[], condition_tensors=condition_tensors) # uncond_logits already see the text
268
 
269
+ # print(f'{logits.shape=} L')
270
+ logits = logits[0, :, :, :].transpose(1,0) # sample expects [1, 4, 2048]
271
+ # logits = [2, 4, 1, 2048]
272
+ # print(f'{B=}, {logits.shape=} SAMPLER {top_k=}')
273
+ next_token = utils.sample_top_k(logits, k=top_k, n_draw=self.n_draw) # [1,4,2048] logits
274
 
 
275
  return next_token
276
 
277
  # GENERATE class revert_codebook_patterns()
 
354
  # but continue the sequence only with isingle next token
355
 
356
  for offset in range(1, gen_sequence_len): # start_offset_sequence=1
357
+ # print(f'{_gen_sequence.shape=}') # [1,4,16]
358
  # starts from 1 not 0 thus uses the 0:1 as curr sequence
359
  # although this is empty contains -1 ?
360
 
audiocraft/transformer.py CHANGED
@@ -177,7 +177,7 @@ class StreamingMultiheadAttention(StreamingModule):
177
  self.past_context = past_context
178
  self.memory_efficient = memory_efficient
179
  self.attention_as_float32 = attention_as_float32
180
- self.rope = rope
181
  self.cross_attention = cross_attention
182
  self.safe_streaming = safe_streaming
183
  self.num_heads = num_heads
@@ -230,38 +230,6 @@ class StreamingMultiheadAttention(StreamingModule):
230
  state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key)
231
  super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
232
 
233
- def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype):
234
- # Return a causal mask, accounting for potentially stored past keys/values
235
- # We actually return a bias for the attention score, as this has the same
236
- # convention both in the builtin MHA in Pytorch, and Xformers functions.
237
- time_dim = _get_attention_time_dimension(self.memory_efficient)
238
- if self.memory_efficient:
239
- from xformers.ops import LowerTriangularMask
240
- if current_steps == 1:
241
- # If we only have one step, then we do not need a mask.
242
- return None
243
- elif 'past_keys' in self._streaming_state:
244
- raise RuntimeError("Not supported at the moment")
245
- else:
246
- # Then we can safely use a lower triangular mask
247
- return LowerTriangularMask()
248
- if self._streaming_state:
249
- past_keys = self._streaming_state['past_keys']
250
- past_steps = past_keys.shape[time_dim]
251
- else:
252
- past_steps = 0
253
-
254
- queries_pos = torch.arange(
255
- past_steps, current_steps + past_steps, device=device).view(-1, 1)
256
- keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1)
257
- delta = queries_pos - keys_pos
258
- valid = delta >= 0
259
- if self.past_context is not None:
260
- valid &= (delta <= self.past_context)
261
- return torch.where(
262
- valid,
263
- torch.zeros([], device=device, dtype=dtype),
264
- torch.full([], float('-inf'), device=device, dtype=dtype))
265
 
266
  def _complete_kv(self, k, v):
267
  time_dim = _get_attention_time_dimension(self.memory_efficient)
@@ -272,11 +240,14 @@ class StreamingMultiheadAttention(StreamingModule):
272
  return k, v
273
  # Complete the key/value pair using the streaming state.
274
  if self._streaming_state:
 
275
  pk = self._streaming_state['past_keys']
276
  nk = torch.cat([pk, k], dim=time_dim)
277
  if v is k:
 
278
  nv = nk
279
  else:
 
280
  pv = self._streaming_state['past_values']
281
  nv = torch.cat([pv, v], dim=time_dim)
282
  else:
@@ -286,35 +257,28 @@ class StreamingMultiheadAttention(StreamingModule):
286
  assert nk.shape[time_dim] == nv.shape[time_dim]
287
  offset = 0
288
  if self.past_context is not None:
 
289
  offset = max(0, nk.shape[time_dim] - self.past_context)
290
  if self._is_streaming:
291
  self._streaming_state['past_keys'] = nk[:, offset:]
292
  if v is not k:
 
293
  self._streaming_state['past_values'] = nv[:, offset:]
294
  if 'offset' in self._streaming_state:
 
295
  self._streaming_state['offset'] += offset
296
  else:
 
297
  self._streaming_state['offset'] = torch.tensor(0)
298
  return nk, nv
299
 
300
- def _apply_rope(self, query: torch.Tensor, key: torch.Tensor):
301
- time_dim = _get_attention_time_dimension(self.memory_efficient)
302
- # Apply rope embeddings to query and key tensors.
303
- assert self.rope is not None
304
- if 'past_keys' in self._streaming_state:
305
- past_keys_offset = self._streaming_state['past_keys'].shape[1]
306
- else:
307
- past_keys_offset = 0
308
- if 'offset' in self._streaming_state:
309
- past_context_offset = int(self._streaming_state['offset'].item())
310
- else:
311
- past_context_offset = 0
312
- streaming_offset = past_context_offset + past_keys_offset
313
- return self.rope.rotate_qk(query, key, start=streaming_offset, time_dim=time_dim)
314
 
315
  def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
316
  key_padding_mask=None, need_weights=False, attn_mask=None,
317
  average_attn_weights=True, is_causal=False):
 
 
318
  assert not is_causal, ("New param added in torch 2.0.1 not supported, "
319
  "use the causal args in the constructor.")
320
 
@@ -328,29 +292,22 @@ class StreamingMultiheadAttention(StreamingModule):
328
  assert self.causal or self.cross_attention, \
329
  "Streaming only available for causal or cross attention"
330
 
331
- custom_attn_mask = attn_mask is not None
332
 
333
- if self.causal:
334
- assert attn_mask is None
335
- # At the moment we specialize only for the self-attention case.
336
- assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value"
337
- assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value"
338
- attn_mask = self._get_mask(query.shape[1], query.device, query.dtype)
339
 
340
  if self.custom:
341
- # custom implementation
342
- assert need_weights is False
343
- assert key_padding_mask is None
344
  if self.cross_attention:
 
345
  # Different queries, keys, values, we have to spit manually the weights
346
  # before applying the linear.
347
  dim = self.in_proj_weight.shape[0] // 3
348
- if self.in_proj_bias is None:
349
- bias_q, bias_k, bias_v = None, None, None
350
- else:
351
- bias_q = self.in_proj_bias[:dim]
352
- bias_k = self.in_proj_bias[dim: 2 * dim]
353
- bias_v = self.in_proj_bias[2 * dim:]
354
  q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q)
355
  # todo: when streaming, we could actually save k, v and check the shape actually match.
356
  k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k)
@@ -366,125 +323,31 @@ class StreamingMultiheadAttention(StreamingModule):
366
  assert value is key, "specialized implementation"
367
  projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias)
368
  if self.kv_repeat == 1:
 
369
  if time_dim == 2:
370
  bound_layout = "b h p t d"
371
  else:
372
  bound_layout = "b t p h d"
373
  packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads)
374
  q, k, v = ops.unbind(packed, dim=2)
375
- else:
376
- embed_dim = self.embed_dim
377
- per_head_dim = (embed_dim // self.num_heads)
378
- kv_heads = self.num_heads // self.kv_repeat
379
- q = projected[:, :, :embed_dim]
380
- start = embed_dim
381
- end = start + per_head_dim * kv_heads
382
- k = projected[:, :, start: end]
383
- v = projected[:, :, end:]
384
- q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads)
385
- k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads)
386
- v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads)
387
 
388
- if self.qk_layer_norm is True:
389
- assert self.kv_repeat == 1
390
- q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]]
391
- q = self.q_layer_norm(q)
392
- k = self.k_layer_norm(k)
393
- q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]]
394
- if self.rope:
395
- q, k = self._apply_rope(q, k)
396
  k, v = self._complete_kv(k, v)
397
- if self.kv_repeat > 1:
398
- k = expand_repeated_kv(k, self.kv_repeat, self.memory_efficient)
399
- v = expand_repeated_kv(v, self.kv_repeat, self.memory_efficient)
400
- if self.attention_as_float32:
401
- q, k, v = [x.float() for x in [q, k, v]]
402
- if self.memory_efficient:
403
- if custom_attn_mask:
404
- # When using a custom attn mask:
405
- # Move to query's device, repeat for each sample, remove align8 padding
406
- seq_len = query.shape[1]
407
- attn_mask = attn_mask.to(q.dtype)
408
- attn_mask = attn_mask.repeat((q.shape[0], 1, 1, 1))
409
- attn_mask = attn_mask[..., :seq_len, :seq_len]
410
-
411
- p = self.dropout if self.training else 0
412
- if _efficient_attention_backend == 'torch':
413
- x = torch.nn.functional.scaled_dot_product_attention(
414
- q, k, v, is_causal=attn_mask is not None, dropout_p=p)
415
- else:
416
- x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p)
417
- else:
418
- # We include the dot product as float32, for consistency
419
- # with the other implementations that include that step
420
- # as part of the attention. Note that when using `autocast`,
421
- # the einsums would be done as bfloat16, but the softmax
422
- # would be done as bfloat16, so `attention_as_float32` will
423
- # extend a bit the range of operations done in float32,
424
- # although this should make no difference.
425
- q = q / q.shape[-1] ** 0.5
426
- key_layout = layout.replace('t', 'k')
427
- query_layout = layout
428
- if self._is_streaming and self.safe_streaming and q.device.type == 'cuda':
429
- with torch.autocast(device_type=q.device.type, dtype=torch.float32):
430
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
431
- else:
432
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
433
- if attn_mask is not None:
434
- pre_w = pre_w + attn_mask
435
- w = torch.softmax(pre_w, dim=-1)
436
- w = F.dropout(w, self.dropout, training=self.training).to(v)
437
- # Key and value have the same format.
438
- x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v)
439
  x = x.to(dtype)
440
  x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads)
441
  x = self.out_proj(x)
442
- else:
443
- key, value = self._complete_kv(key, value)
444
- if self.attention_as_float32:
445
- query, key, value = [x.float() for x in [query, key, value]]
446
- x, _ = self.mha(
447
- query, key, value, key_padding_mask,
448
- need_weights, attn_mask, average_attn_weights)
449
- x = x.to(dtype)
450
-
451
  return x, None
452
 
453
 
454
  class StreamingTransformerLayer(nn.TransformerEncoderLayer):
455
- """TransformerLayer with Streaming / Causal support.
456
- This also integrates cross_attention, when passing `cross_attention=True`,
457
- rather than having two separate classes like in PyTorch.
458
 
459
- Args:
460
- d_model (int): Dimension of the data.
461
- num_heads (int): Number of heads.
462
- dim_feedforward (int): Intermediate dimension of FF module.
463
- dropout (float): Dropout both for MHA and FF.
464
- bias_ff (bool): Use bias for FF.
465
- bias_attn (bool): Use bias for MHA.
466
- causal (bool): Causal mask applied automatically.
467
- past_context (int, optional): Receptive field for the causal mask, infinite if None.
468
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
469
- memory_efficient (bool): Use xformers based memory efficient attention.
470
- attention_as_float32 (bool): Perform the attention as float32
471
- (especially important with memory_efficient as autocast won't do this automatically).
472
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention.
473
- qk_layer_norm_cross (bool): Same for the cross attention.
474
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
475
- Cross attention will use the default MHA, as it typically won't require
476
- special treatment.
477
- layer_scale (float, optional): If not None, LayerScale will be used with
478
- the given value as initial scale.
479
- rope (`RotaryEmbedding`, optional): Rope embedding to use.
480
- attention_dropout (float, optional): If not None, separate the value of the dimension dropout
481
- in FFN and of the attention dropout.
482
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
483
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
484
- device (torch.device, optional): Device on which to initialize.
485
- dtype (torch.dtype, optional): dtype to use.
486
- **kwargs: See `nn.TransformerEncoderLayer`.
487
- """
488
  def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1,
489
  bias_ff: bool = True, bias_attn: bool = True, causal: bool = False,
490
  past_context: tp.Optional[int] = None, custom: bool = False,
@@ -632,6 +495,7 @@ class StreamingTransformer(StreamingModule):
632
  assert positional_embedding in ['sin', 'rope', 'sin_rope']
633
  self.rope: tp.Optional[RotaryEmbedding] = None
634
  if self.positional_embedding in ['rope', 'sin_rope']:
 
635
  assert _is_custom(custom, memory_efficient)
636
  self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period,
637
  xpos=xpos, scale=positional_scale, device=device)
@@ -659,43 +523,11 @@ class StreamingTransformer(StreamingModule):
659
  # backward hook inside of FSDP...
660
  layer._magma_checkpointed = True # type: ignore
661
 
662
- def _apply_layer(self, layer, *args, **kwargs):
663
- method = self.checkpointing
664
- print(f'{method=}')
665
- if method == 'none':
666
- print([i.shape for i in args])
667
- x = layer(*args, **kwargs) # [10, 1, 1536] probably does no t detect the bathc somwhere
668
- return x
669
- # elif method == 'torch':
670
- # print('TORCH')
671
- # return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs)
672
- # elif method.startswith('xformers'):
673
- # print('XFORMERS')
674
- # from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy
675
- # if method == 'xformers_default':
676
- # # those operations will be saved, and not recomputed.
677
- # # According to Francisco we can get smarter policies but this is a good start.
678
- # allow_list = [
679
- # "xformers.efficient_attention_forward_cutlass.default",
680
- # "xformers_flash.flash_fwd.default",
681
- # "aten.addmm.default",
682
- # "aten.mm.default",
683
- # ]
684
- # elif method == 'xformers_mm':
685
- # # those operations will be saved, and not recomputed.
686
- # # According to Francisco we can get smarter policies but this is a good start.
687
- # allow_list = [
688
- # "aten.addmm.default",
689
- # "aten.mm.default",
690
- # ]
691
- # else:
692
- # raise ValueError(f"xformers checkpointing xformers policy {method} is not known.")
693
- # policy_fn = _get_default_policy(allow_list)
694
- # return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs)
695
- # else:
696
- # raise ValueError(f"Checkpointing method {method} is unknown.")
697
 
698
  def forward(self, x: torch.Tensor, *args, **kwargs):
 
 
699
  B, T, C = x.shape
700
 
701
  if 'offsets' in self._streaming_state:
@@ -704,17 +536,20 @@ class StreamingTransformer(StreamingModule):
704
  offsets = torch.zeros(B, dtype=torch.long, device=x.device)
705
 
706
  if self.positional_embedding in ['sin', 'sin_rope']:
 
707
  positions = torch.arange(T, device=x.device).view(1, -1, 1)
708
  positions = positions + offsets.view(-1, 1, 1)
709
  pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype)
710
  x = x + self.positional_scale * pos_emb
711
 
712
  for layer in self.layers:
713
- x = self._apply_layer(layer, x, *args, **kwargs)
 
 
714
 
715
  if self._is_streaming:
716
  self._streaming_state['offsets'] = offsets + T
717
-
718
  return x
719
 
720
  def make_optim_group(self):
 
177
  self.past_context = past_context
178
  self.memory_efficient = memory_efficient
179
  self.attention_as_float32 = attention_as_float32
180
+
181
  self.cross_attention = cross_attention
182
  self.safe_streaming = safe_streaming
183
  self.num_heads = num_heads
 
230
  state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key)
231
  super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
  def _complete_kv(self, k, v):
235
  time_dim = _get_attention_time_dimension(self.memory_efficient)
 
240
  return k, v
241
  # Complete the key/value pair using the streaming state.
242
  if self._streaming_state:
243
+ # print('{self._streaming_state.keys()=}') EMPTY - ALTHOUGH WE HAVE STREAMING STATE
244
  pk = self._streaming_state['past_keys']
245
  nk = torch.cat([pk, k], dim=time_dim)
246
  if v is k:
247
+
248
  nv = nk
249
  else:
250
+
251
  pv = self._streaming_state['past_values']
252
  nv = torch.cat([pv, v], dim=time_dim)
253
  else:
 
257
  assert nk.shape[time_dim] == nv.shape[time_dim]
258
  offset = 0
259
  if self.past_context is not None:
260
+
261
  offset = max(0, nk.shape[time_dim] - self.past_context)
262
  if self._is_streaming:
263
  self._streaming_state['past_keys'] = nk[:, offset:]
264
  if v is not k:
265
+
266
  self._streaming_state['past_values'] = nv[:, offset:]
267
  if 'offset' in self._streaming_state:
268
+
269
  self._streaming_state['offset'] += offset
270
  else:
271
+
272
  self._streaming_state['offset'] = torch.tensor(0)
273
  return nk, nv
274
 
275
+
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
  def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
278
  key_padding_mask=None, need_weights=False, attn_mask=None,
279
  average_attn_weights=True, is_causal=False):
280
+
281
+
282
  assert not is_causal, ("New param added in torch 2.0.1 not supported, "
283
  "use the causal args in the constructor.")
284
 
 
292
  assert self.causal or self.cross_attention, \
293
  "Streaming only available for causal or cross attention"
294
 
295
+
296
 
297
+
298
+
299
+
 
 
 
300
 
301
  if self.custom:
302
+
 
 
303
  if self.cross_attention:
304
+
305
  # Different queries, keys, values, we have to spit manually the weights
306
  # before applying the linear.
307
  dim = self.in_proj_weight.shape[0] // 3
308
+
309
+ bias_q, bias_k, bias_v = None, None, None
310
+
 
 
 
311
  q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q)
312
  # todo: when streaming, we could actually save k, v and check the shape actually match.
313
  k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k)
 
323
  assert value is key, "specialized implementation"
324
  projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias)
325
  if self.kv_repeat == 1:
326
+
327
  if time_dim == 2:
328
  bound_layout = "b h p t d"
329
  else:
330
  bound_layout = "b t p h d"
331
  packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads)
332
  q, k, v = ops.unbind(packed, dim=2)
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
+
 
 
 
 
 
 
 
335
  k, v = self._complete_kv(k, v)
336
+ #print(f'{k.shape=}, {v.shape=}, {q.shape=}\n\n\n\n')
337
+ # what is the 24 dimension is this heads?
338
+
339
+ x = torch.nn.functional.scaled_dot_product_attention(
340
+ q, k, v, is_causal=attn_mask is not None, dropout_p=0)
341
+
342
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
  x = x.to(dtype)
344
  x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads)
345
  x = self.out_proj(x)
 
 
 
 
 
 
 
 
 
346
  return x, None
347
 
348
 
349
  class StreamingTransformerLayer(nn.TransformerEncoderLayer):
 
 
 
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
  def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1,
352
  bias_ff: bool = True, bias_attn: bool = True, causal: bool = False,
353
  past_context: tp.Optional[int] = None, custom: bool = False,
 
495
  assert positional_embedding in ['sin', 'rope', 'sin_rope']
496
  self.rope: tp.Optional[RotaryEmbedding] = None
497
  if self.positional_embedding in ['rope', 'sin_rope']:
498
+ print('ROPE\nL')
499
  assert _is_custom(custom, memory_efficient)
500
  self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period,
501
  xpos=xpos, scale=positional_scale, device=device)
 
523
  # backward hook inside of FSDP...
524
  layer._magma_checkpointed = True # type: ignore
525
 
526
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527
 
528
  def forward(self, x: torch.Tensor, *args, **kwargs):
529
+ # Input x: [1, 1, 1536]
530
+ # Output x: [2, 1, 1536] how is batch expanded to 2
531
  B, T, C = x.shape
532
 
533
  if 'offsets' in self._streaming_state:
 
536
  offsets = torch.zeros(B, dtype=torch.long, device=x.device)
537
 
538
  if self.positional_embedding in ['sin', 'sin_rope']:
539
+ # print(f'{self.positional_embedding=}\n') 'sin'
540
  positions = torch.arange(T, device=x.device).view(1, -1, 1)
541
  positions = positions + offsets.view(-1, 1, 1)
542
  pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype)
543
  x = x + self.positional_scale * pos_emb
544
 
545
  for layer in self.layers:
546
+ # print(f'{args=} {kwargs.keys()=}')
547
+ # # kwargs=() kwargs={'cross_attention_src', 'src_mask'}
548
+ x = layer(x, **kwargs)
549
 
550
  if self._is_streaming:
551
  self._streaming_state['offsets'] = offsets + T
552
+ print('OUT STReamTransfor', x.shape)
553
  return x
554
 
555
  def make_optim_group(self):
audiocraft/utils/utils.py CHANGED
@@ -94,6 +94,11 @@ def sample_top_k(p, k, n_draw=None):
94
  p probabs 2048 ?
95
  num_draw : how many tokens to sample (for duplicate elongation)
96
  """
 
 
 
 
 
97
  top_k_value, i250 = torch.topk(p, k, dim=-1) # probs: [1, 4, 2048]
98
  min_value_top_k = top_k_value[..., [-1]] #
99
  p *= (p >= min_value_top_k).float()
 
94
  p probabs 2048 ?
95
  num_draw : how many tokens to sample (for duplicate elongation)
96
  """
97
+
98
+ p = torch.softmax(p / 1.0, dim=-1)
99
+
100
+
101
+
102
  top_k_value, i250 = torch.topk(p, k, dim=-1) # probs: [1, 4, 2048]
103
  min_value_top_k = top_k_value[..., [-1]] #
104
  p *= (p >= min_value_top_k).float()