Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from speechbrain.pretrained import SepformerSeparation as separator
|
3 |
+
import torchaudio
|
4 |
+
|
5 |
+
def predict_song(audio_path):
|
6 |
+
est_sources = model.separate_file(path=audio_path)
|
7 |
+
torchaudio.save("enhanced_wham.wav", est_sources[:, :, 0].detach().cpu(), 8000)
|
8 |
+
return "enhanced_wham.wav"
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
# Create title, description and article strings
|
13 |
+
title = "Denoise Audio Using Sepformer"
|
14 |
+
description = "Using SepFormer model implemented with SpeechBrain"
|
15 |
+
article = "Tham khao Hunggingface [speechbrain/sepformer-wsj02mixt](https://huggingface.co/speechbrain/sepformer-wsj02mix)."
|
16 |
+
|
17 |
+
# Create the Gradio demo
|
18 |
+
demo = gr.Interface(fn=predict_song, # mapping function from input to output
|
19 |
+
inputs=gr.Audio(type="filepath"), # what are the inputs?
|
20 |
+
outputs=gr.File(file_count="multiple", file_types=[".wav"]), # our fn has two outputs, therefore we have two outputs
|
21 |
+
title=title,
|
22 |
+
description=description,
|
23 |
+
article=article)
|
24 |
+
|
25 |
+
# Launch the demo!
|
26 |
+
demo.launch()
|