Spaces:
Sleeping
Sleeping
File size: 2,189 Bytes
89bed8d 96559f5 4df38d2 395c133 96559f5 265c6e4 96559f5 265c6e4 96559f5 265c6e4 96559f5 265c6e4 96559f5 265c6e4 96559f5 265c6e4 96559f5 265c6e4 96559f5 395c133 96559f5 265c6e4 96559f5 265c6e4 96559f5 265c6e4 96559f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import gradio as gr
import os
from audio_separator.separator import Separator
def separate_audio(input_file, output_dir, model_name):
# Create output directory if it doesn't exist
os.makedirs(output_dir, exist_ok=True)
# Initialize the Separator
separator = Separator()
# Separate the audio
separator.separate_audio_file(
input_file,
output_dir,
model_name=model_name,
denoise=True,
output_format='wav',
normalization_threshold=0.9,
mdx_segment_size=256,
mdx_overlap=8,
primary_stem_only=False
)
# Rename the output files to match the requested format
os.rename(os.path.join(output_dir, 'Vocals.wav'), os.path.join(output_dir, '1_main_vocal.wav'))
os.rename(os.path.join(output_dir, 'Other.wav'), os.path.join(output_dir, '2_backing_vocal.wav'))
os.rename(os.path.join(output_dir, 'Instrumental.wav'), os.path.join(output_dir, '3_instrumental.wav'))
return [
os.path.join(output_dir, '1_main_vocal.wav'),
os.path.join(output_dir, '2_backing_vocal.wav'),
os.path.join(output_dir, '3_instrumental.wav')
]
def process_audio(audio_file, model_name):
output_dir = "output"
return separate_audio(audio_file.name, output_dir, model_name)
# Define the Gradio interface
iface = gr.Blocks()
with iface:
gr.Markdown("# Audio Separator")
with gr.Row():
with gr.Column():
audio_input = gr.Audio(type="filepath", label="Input Audio")
model_name = gr.Dropdown(
choices=["UVR-MDX-NET-Inst_HQ_3", "UVR_MDXNET_KARA_2", "UVR-MDX-NET-Inst_HQ_4"],
label="Model",
value="UVR-MDX-NET-Inst_HQ_3"
)
submit_btn = gr.Button("Separate Audio")
with gr.Column():
vocal_output = gr.Audio(label="Main Vocal")
backing_vocal_output = gr.Audio(label="Backing Vocal")
instrumental_output = gr.Audio(label="Instrumental")
submit_btn.click(
process_audio,
inputs=[audio_input, model_name],
outputs=[vocal_output, backing_vocal_output, instrumental_output]
)
iface.launch()
|