quazim commited on
Commit
ff2a87e
·
1 Parent(s): 81059be
Files changed (1) hide show
  1. app.py +21 -23
app.py CHANGED
@@ -49,14 +49,14 @@ def cleanup_temp_files():
49
  import time
50
  temp_dir = tempfile.gettempdir()
51
  cutoff_time = time.time() - 3600
52
-
53
  # Clean old generated music files
54
  patterns = [
55
  os.path.join(temp_dir, "tmp*.wav"),
56
  os.path.join(temp_dir, "generated_music_*.wav"),
57
  os.path.join(temp_dir, "musicgen_variant_*.wav")
58
  ]
59
-
60
  for pattern in patterns:
61
  for temp_file in glob.glob(pattern):
62
  try:
@@ -312,7 +312,7 @@ def generate_music_batch(text_prompt, duration=10, guidance_scale=3.0, model_mod
312
 
313
  batch_size = MODEL_CONFIG['batch_size'] if MODEL_CONFIG['batch_mode'] else 1
314
  prompts = [text_prompt] * batch_size
315
-
316
  start_time = time.time()
317
  outputs = generator(
318
  prompts,
@@ -325,21 +325,21 @@ def generate_music_batch(text_prompt, duration=10, guidance_scale=3.0, model_mod
325
 
326
  audio_variants = []
327
  sample_rate = outputs[0]['sampling_rate']
328
-
329
  # Create unique timestamp for this generation batch
330
  batch_timestamp = int(time.time() * 1000)
331
-
332
  for i, output in enumerate(outputs):
333
  audio_data = output['audio']
334
-
335
  print(f"[GENERATION] Processing variant {i + 1} audio shape: {audio_data.shape}")
336
-
337
  if hasattr(audio_data, 'cpu'):
338
  audio_data = audio_data.cpu().numpy()
339
 
340
  if len(audio_data.shape) == 3:
341
  audio_data = audio_data[0]
342
-
343
  if len(audio_data.shape) == 2:
344
  if audio_data.shape[0] < audio_data.shape[1]:
345
  audio_data = audio_data.T
@@ -347,36 +347,34 @@ def generate_music_batch(text_prompt, duration=10, guidance_scale=3.0, model_mod
347
  audio_data = audio_data[:, 0]
348
  else:
349
  audio_data = audio_data.flatten()
350
-
351
  audio_data = audio_data.flatten()
352
-
353
  max_val = np.max(np.abs(audio_data))
354
  if max_val > 0:
355
  audio_data = audio_data / max_val * 0.95
356
-
357
  audio_data = (audio_data * 32767).astype(np.int16)
358
-
359
  # Save each variant to a unique temporary file
360
- temp_filename = f"musicgen_variant_{i+1}_{batch_timestamp}.wav"
361
  temp_path = os.path.join(tempfile.gettempdir(), temp_filename)
362
-
363
  sf.write(temp_path, audio_data, sample_rate)
364
-
365
  print(f"[GENERATION] Variant {i + 1} saved to: {temp_path}")
366
  print(f"[GENERATION] Variant {i + 1} file size: {os.path.getsize(temp_path)} bytes")
367
-
368
  audio_variants.append(temp_path)
369
-
370
  print(f"[GENERATION] Variant {i + 1} final shape: {audio_data.shape}")
371
 
372
  while len(audio_variants) < 6:
373
  audio_variants.append(None)
374
 
375
- savings_message = get_fixed_savings_message()
376
-
377
  variants_text = "audio"
378
- generation_info = f"✅ Generated {variants_text} in {generation_time:.2f}s\n{savings_message}"
379
-
380
  return audio_variants[0], audio_variants[1], audio_variants[2], audio_variants[3], audio_variants[4], \
381
  audio_variants[5], generation_info
382
 
@@ -434,12 +432,12 @@ with gr.Blocks(title="MusicGen Large - Music Generation") as demo:
434
  audio_output1 = gr.Audio(label="Variant 1", type="filepath", visible=actual_outputs >= 1)
435
  audio_output2 = gr.Audio(label="Variant 2", type="filepath", visible=actual_outputs >= 2)
436
  audio_outputs.extend([audio_output1, audio_output2])
437
-
438
  with gr.Row():
439
  audio_output3 = gr.Audio(label="Variant 3", type="filepath", visible=actual_outputs >= 3)
440
  audio_output4 = gr.Audio(label="Variant 4", type="filepath", visible=actual_outputs >= 4)
441
  audio_outputs.extend([audio_output3, audio_output4])
442
-
443
  with gr.Row():
444
  audio_output5 = gr.Audio(label="Variant 5", type="filepath", visible=actual_outputs >= 5)
445
  audio_output6 = gr.Audio(label="Variant 6", type="filepath", visible=actual_outputs >= 6)
 
49
  import time
50
  temp_dir = tempfile.gettempdir()
51
  cutoff_time = time.time() - 3600
52
+
53
  # Clean old generated music files
54
  patterns = [
55
  os.path.join(temp_dir, "tmp*.wav"),
56
  os.path.join(temp_dir, "generated_music_*.wav"),
57
  os.path.join(temp_dir, "musicgen_variant_*.wav")
58
  ]
59
+
60
  for pattern in patterns:
61
  for temp_file in glob.glob(pattern):
62
  try:
 
312
 
313
  batch_size = MODEL_CONFIG['batch_size'] if MODEL_CONFIG['batch_mode'] else 1
314
  prompts = [text_prompt] * batch_size
315
+
316
  start_time = time.time()
317
  outputs = generator(
318
  prompts,
 
325
 
326
  audio_variants = []
327
  sample_rate = outputs[0]['sampling_rate']
328
+
329
  # Create unique timestamp for this generation batch
330
  batch_timestamp = int(time.time() * 1000)
331
+
332
  for i, output in enumerate(outputs):
333
  audio_data = output['audio']
334
+
335
  print(f"[GENERATION] Processing variant {i + 1} audio shape: {audio_data.shape}")
336
+
337
  if hasattr(audio_data, 'cpu'):
338
  audio_data = audio_data.cpu().numpy()
339
 
340
  if len(audio_data.shape) == 3:
341
  audio_data = audio_data[0]
342
+
343
  if len(audio_data.shape) == 2:
344
  if audio_data.shape[0] < audio_data.shape[1]:
345
  audio_data = audio_data.T
 
347
  audio_data = audio_data[:, 0]
348
  else:
349
  audio_data = audio_data.flatten()
350
+
351
  audio_data = audio_data.flatten()
352
+
353
  max_val = np.max(np.abs(audio_data))
354
  if max_val > 0:
355
  audio_data = audio_data / max_val * 0.95
356
+
357
  audio_data = (audio_data * 32767).astype(np.int16)
358
+
359
  # Save each variant to a unique temporary file
360
+ temp_filename = f"musicgen_variant_{i + 1}_{batch_timestamp}.wav"
361
  temp_path = os.path.join(tempfile.gettempdir(), temp_filename)
362
+
363
  sf.write(temp_path, audio_data, sample_rate)
364
+
365
  print(f"[GENERATION] Variant {i + 1} saved to: {temp_path}")
366
  print(f"[GENERATION] Variant {i + 1} file size: {os.path.getsize(temp_path)} bytes")
367
+
368
  audio_variants.append(temp_path)
369
+
370
  print(f"[GENERATION] Variant {i + 1} final shape: {audio_data.shape}")
371
 
372
  while len(audio_variants) < 6:
373
  audio_variants.append(None)
374
 
 
 
375
  variants_text = "audio"
376
+ generation_info = f"✅ Generated {variants_text} in {generation_time:.2f}s\n"
377
+
378
  return audio_variants[0], audio_variants[1], audio_variants[2], audio_variants[3], audio_variants[4], \
379
  audio_variants[5], generation_info
380
 
 
432
  audio_output1 = gr.Audio(label="Variant 1", type="filepath", visible=actual_outputs >= 1)
433
  audio_output2 = gr.Audio(label="Variant 2", type="filepath", visible=actual_outputs >= 2)
434
  audio_outputs.extend([audio_output1, audio_output2])
435
+
436
  with gr.Row():
437
  audio_output3 = gr.Audio(label="Variant 3", type="filepath", visible=actual_outputs >= 3)
438
  audio_output4 = gr.Audio(label="Variant 4", type="filepath", visible=actual_outputs >= 4)
439
  audio_outputs.extend([audio_output3, audio_output4])
440
+
441
  with gr.Row():
442
  audio_output5 = gr.Audio(label="Variant 5", type="filepath", visible=actual_outputs >= 5)
443
  audio_output6 = gr.Audio(label="Variant 6", type="filepath", visible=actual_outputs >= 6)