Spaces:
Running
on
Zero
Running
on
Zero
chong.zhang
commited on
Commit
·
698dade
1
Parent(s):
a55649b
update
Browse files
app.py
CHANGED
@@ -114,7 +114,16 @@ def cut_audio(audio_file, cut_seconds=5):
|
|
114 |
torchaudio.save(output_path, cutted_audio, sr)
|
115 |
return output_path
|
116 |
|
117 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
output_sample_rate, max_generate_audio_seconds):
|
119 |
args = get_args(
|
120 |
task=task, text=text, audio=cut_audio(audio, cut_seconds=5),
|
@@ -161,21 +170,29 @@ with gr.Blocks() as demo:
|
|
161 |
default_prompt_buttons = []
|
162 |
for prompt in default_prompts:
|
163 |
button = gr.Button(value=prompt)
|
164 |
-
button.click(
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
default_prompt_buttons.append(button)
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
generate_button.click(run_inspiremusic,
|
175 |
inputs=[task, text_input, audio_input, model_name,
|
176 |
chorus, fast, fade_out,
|
177 |
output_sample_rate,
|
178 |
max_generate_audio_seconds],
|
179 |
outputs=music_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
demo.launch()
|
|
|
114 |
torchaudio.save(output_path, cutted_audio, sr)
|
115 |
return output_path
|
116 |
|
117 |
+
def run_text2music(task, text, model_name, chorus, fast, fade_out,
|
118 |
+
output_sample_rate, max_generate_audio_seconds):
|
119 |
+
args = get_args(
|
120 |
+
task=task, text=text, None,
|
121 |
+
model_name=model_name, chorus=chorus, fast=fast,
|
122 |
+
fade_out=fade_out, output_sample_rate=output_sample_rate,
|
123 |
+
max_generate_audio_seconds=max_generate_audio_seconds)
|
124 |
+
return music_generation(args)
|
125 |
+
|
126 |
+
def run_continuation(task, text, audio, model_name, chorus, fast, fade_out,
|
127 |
output_sample_rate, max_generate_audio_seconds):
|
128 |
args = get_args(
|
129 |
task=task, text=text, audio=cut_audio(audio, cut_seconds=5),
|
|
|
170 |
default_prompt_buttons = []
|
171 |
for prompt in default_prompts:
|
172 |
button = gr.Button(value=prompt)
|
173 |
+
button.click(run_text2music,
|
174 |
+
inputs = [task, text_input, None, model_name,
|
175 |
+
chorus, fast, fade_out,
|
176 |
+
output_sample_rate,
|
177 |
+
max_generate_audio_seconds],
|
178 |
+
outputs = music_output)
|
179 |
default_prompt_buttons.append(button)
|
180 |
+
with gr.Row():
|
181 |
+
generate_button = gr.Button("Music Continuation")
|
182 |
+
generate_button.click(run_continuation,
|
|
|
183 |
inputs=[task, text_input, audio_input, model_name,
|
184 |
chorus, fast, fade_out,
|
185 |
output_sample_rate,
|
186 |
max_generate_audio_seconds],
|
187 |
outputs=music_output)
|
188 |
+
button = gr.Button("Text to Music")
|
189 |
+
|
190 |
+
button.click(run_text2music,
|
191 |
+
inputs=[task, text_input, model_name,
|
192 |
+
chorus, fast, fade_out,
|
193 |
+
output_sample_rate,
|
194 |
+
max_generate_audio_seconds],
|
195 |
+
outputs=music_output)
|
196 |
+
|
197 |
|
198 |
demo.launch()
|