Update app.py
Browse files
app.py
CHANGED
@@ -123,7 +123,7 @@ def generate_music(
|
|
123 |
genre_txt=None,
|
124 |
lyrics_txt=None,
|
125 |
run_n_segments=2,
|
126 |
-
max_new_tokens=
|
127 |
use_audio_prompt=False,
|
128 |
audio_prompt_path="",
|
129 |
prompt_start_time=0.0,
|
@@ -226,8 +226,6 @@ def generate_music(
|
|
226 |
tensor_eoa = torch.as_tensor([[mmtokenizer.eoa]]).to(model.device)
|
227 |
output_seq = torch.cat((output_seq, tensor_eoa), dim=1)
|
228 |
|
229 |
-
output_seq = model_inference(input_ids, max_new_tokens, top_p, temperature, repetition_penalty, guidance_scale)
|
230 |
-
|
231 |
if i > 1:
|
232 |
raw_output = torch.cat([raw_output, prompt_ids, output_seq[:, input_ids.shape[-1]:]], dim=1)
|
233 |
else:
|
|
|
123 |
genre_txt=None,
|
124 |
lyrics_txt=None,
|
125 |
run_n_segments=2,
|
126 |
+
max_new_tokens=5,
|
127 |
use_audio_prompt=False,
|
128 |
audio_prompt_path="",
|
129 |
prompt_start_time=0.0,
|
|
|
226 |
tensor_eoa = torch.as_tensor([[mmtokenizer.eoa]]).to(model.device)
|
227 |
output_seq = torch.cat((output_seq, tensor_eoa), dim=1)
|
228 |
|
|
|
|
|
229 |
if i > 1:
|
230 |
raw_output = torch.cat([raw_output, prompt_ids, output_seq[:, input_ids.shape[-1]:]], dim=1)
|
231 |
else:
|