fffiloni commited on
Commit
d385f3b
·
verified ·
1 Parent(s): 406f22d

Create gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +48 -0
gradio_app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchaudio
4
+ import gradio as gr
5
+ import look2hear.models
6
+
7
+ # Setup environment and model
8
+ os.environ["CUDA_VISIBLE_DEVICES"] = "2"
9
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
10
+
11
+ model = look2hear.models.TIGERDNR.from_pretrained("JusperLee/TIGER-DnR", cache_dir="cache")
12
+ model.to(device)
13
+ model.eval()
14
+
15
+ # Processing function
16
+ def separate_audio(audio_file):
17
+ audio, sr = torchaudio.load(audio_file)
18
+ audio = audio.to(device)
19
+
20
+ with torch.no_grad():
21
+ all_target_dialog, all_target_effect, all_target_music = model(audio[None])
22
+
23
+ # Save outputs
24
+ dialog_path = "dialog_output.wav"
25
+ effect_path = "effect_output.wav"
26
+ music_path = "music_output.wav"
27
+
28
+ torchaudio.save(dialog_path, all_target_dialog.cpu(), sr)
29
+ torchaudio.save(effect_path, all_target_effect.cpu(), sr)
30
+ torchaudio.save(music_path, all_target_music.cpu(), sr)
31
+
32
+ return dialog_path, effect_path, music_path
33
+
34
+ # Gradio UI
35
+ demo = gr.Interface(
36
+ fn=separate_audio,
37
+ inputs=gr.Audio(type="filepath", label="Upload Audio File"),
38
+ outputs=[
39
+ gr.Audio(label="Dialog", type="filepath"),
40
+ gr.Audio(label="Effects", type="filepath"),
41
+ gr.Audio(label="Music", type="filepath")
42
+ ],
43
+ title="TIGER-DnR Audio Separator",
44
+ description="Upload a mixed audio file to separate it into dialog, effects, and music using the TIGER-DnR model."
45
+ )
46
+
47
+ if __name__ == "__main__":
48
+ demo.launch()