Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,490 Bytes
d2d09cc 4bafd38 d2d09cc 4bafd38 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc 771b4e4 d2d09cc efd2061 d2d09cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import gradio as gr
import torchaudio
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
import spaces
@spaces.GPU(duration=120)
@spaces.GPU(duration=120)
def generate_music(description, melody_audio):
model = MusicGen.get_pretrained('nateraw/musicgen-songstarter-v0.2')
model.set_generation_params(duration=8)
if description:
description = [description]
if melody_audio:
melody, sr = torchaudio.load(melody_audio)
wav = model.generate_with_chroma(description, melody[None], sr)
else:
wav = model.generate(description)
else:
wav = model.generate_unconditional(1)
output_path = 'output.wav'
audio_write(output_path, wav[0].cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True)
return output_path
description = gr.Textbox(label="Description", placeholder="acoustic, guitar, melody, trap, d minor, 90 bpm")
melody_audio = gr.Audio(label="Melody Audio (optional)", type="filepath")
output_path = gr.Audio(label="Generated Music", type="filepath")
gr.Interface(
fn=generate_music,
inputs=[description, melody_audio],
outputs=output_audio,
title="MusicGen Demo",
description="Generate music using the MusicGen model.",
examples=[
["trap, synthesizer, songstarters, dark, G# minor, 140 bpm", "./assets/kalhonaho.mp3"],
["upbeat, electronic, synth, dance, 120 bpm", None]
]
).launch()
|