|
import torch |
|
import torchaudio |
|
from einops import rearrange |
|
import gradio as gr |
|
import spaces |
|
import os |
|
import uuid |
|
|
|
|
|
from stable_audio_tools import get_pretrained_model |
|
from stable_audio_tools.inference.generation import generate_diffusion_cond |
|
|
|
|
|
def load_model(): |
|
print("Loading model...") |
|
model, model_config = get_pretrained_model("sonalkum/synthio-stable-audio-open") |
|
print("Model loaded successfully.") |
|
return model, model_config |
|
|
|
|
|
@spaces.GPU(duration=120) |
|
def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7): |
|
print(f"Prompt received: {prompt}") |
|
print(f"Settings: Duration={seconds_total}s, Steps={steps}, CFG Scale={cfg_scale}") |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
|
|
|
|
hf_token = os.getenv('HF_TOKEN') |
|
print(f"Hugging Face token: {hf_token}") |
|
|
|
|
|
|
|
sample_rate = model_config["sample_rate"] |
|
sample_size = model_config["sample_size"] |
|
|
|
print(f"Sample rate: {sample_rate}, Sample size: {sample_size}") |
|
|
|
model = model.to(device) |
|
print("Model moved to device.") |
|
|
|
|
|
conditioning = [{ |
|
"prompt": prompt, |
|
"seconds_start": 0, |
|
"seconds_total": seconds_total |
|
}] |
|
print(f"Conditioning: {conditioning}") |
|
|
|
|
|
print("Generating audio...") |
|
output = generate_diffusion_cond( |
|
model, |
|
steps=steps, |
|
cfg_scale=cfg_scale, |
|
conditioning=conditioning, |
|
sample_size=sample_size, |
|
sigma_min=0.3, |
|
sigma_max=500, |
|
sampler_type="dpmpp-3m-sde", |
|
device=device |
|
) |
|
print("Audio generated.") |
|
|
|
|
|
output = rearrange(output, "b d n -> d (b n)") |
|
print("Audio rearranged.") |
|
|
|
|
|
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu() |
|
print("Audio normalized and converted.") |
|
|
|
|
|
unique_filename = f"output_{uuid.uuid4().hex}.wav" |
|
print(f"Saving audio to file: {unique_filename}") |
|
|
|
|
|
torchaudio.save(unique_filename, output, sample_rate) |
|
print(f"Audio saved: {unique_filename}") |
|
|
|
|
|
return unique_filename |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_audio, |
|
inputs=[ |
|
gr.Textbox(label="Prompt", placeholder="Enter your text prompt here"), |
|
gr.Slider(0, 10, value=5, label="Duration in Seconds"), |
|
gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps"), |
|
gr.Slider(1, 10, value=7, step=0.1, label="CFG Scale") |
|
], |
|
outputs=gr.Audio(type="filepath", label="Generated Audio"), |
|
title="Synthio Stable Audio Generator", |
|
description="Generate variable-length stereo audio at 44.1kHz from text prompts using Synthio's Stable Audio Open 1.0.") |
|
|
|
|
|
|
|
model, model_config = load_model() |
|
|
|
|
|
interface.launch() |