fffiloni commited on
Commit
6b4c0c8
·
verified ·
1 Parent(s): 636f4a7

add ACE Step model

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py CHANGED
@@ -47,6 +47,12 @@ def check_api(model_name):
47
  return "api ready"
48
  except:
49
  return "api not ready yet"
 
 
 
 
 
 
50
 
51
 
52
  from moviepy.editor import VideoFileClip
@@ -185,6 +191,39 @@ def get_stable_audio_open(prompt):
185
  print(result)
186
  return result
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  import re
189
  import torch
190
  from transformers import pipeline
@@ -272,6 +311,9 @@ def infer(image_in, chosen_model, api_status):
272
  elif chosen_model == "Stable Audio Open" :
273
  gr.Info("Now calling Stable Audio Open for music...")
274
  music_o = get_stable_audio_open(musical_prompt)
 
 
 
275
 
276
  return gr.update(value=musical_prompt, interactive=True), gr.update(visible=True), music_o
277
 
@@ -297,6 +339,9 @@ def retry(chosen_model, caption):
297
  elif chosen_model == "Stable Audio Open" :
298
  gr.Info("Now calling Stable Audio Open for music...")
299
  music_o = get_stable_audio_open(musical_prompt)
 
 
 
300
 
301
  return music_o
302
 
@@ -341,6 +386,7 @@ with gr.Blocks(css=css) as demo:
341
  label = "Choose a model",
342
  choices = [
343
  #"MAGNet",
 
344
  "AudioLDM-2",
345
  "Riffusion",
346
  "Mustango",
 
47
  return "api ready"
48
  except:
49
  return "api not ready yet"
50
+ elif model_name == "ACE Step":
51
+ try :
52
+ client = Client("ACE-Step/ACE-Step")
53
+ return "api ready"
54
+ except :
55
+ return "api not ready yet"
56
 
57
 
58
  from moviepy.editor import VideoFileClip
 
191
  print(result)
192
  return result
193
 
194
+ def get_ace(prompt):
195
+ from gradio_client import Client, handle_file
196
+
197
+ client = Client("ACE-Step/ACE-Step")
198
+ result = client.predict(
199
+ audio_duration=-1,
200
+ prompt=prompt,
201
+ lyrics="[inst]",
202
+ infer_step=60,
203
+ guidance_scale=15,
204
+ scheduler_type="euler",
205
+ cfg_type="apg",
206
+ omega_scale=10,
207
+ manual_seeds=None,
208
+ guidance_interval=0.5,
209
+ guidance_interval_decay=0,
210
+ min_guidance_scale=3,
211
+ use_erg_tag=True,
212
+ use_erg_lyric=False,
213
+ use_erg_diffusion=True,
214
+ oss_steps=None,
215
+ guidance_scale_text=0,
216
+ guidance_scale_lyric=0,
217
+ audio2audio_enable=False,
218
+ ref_audio_strength=0.5,
219
+ ref_audio_input=None,
220
+ lora_name_or_path="none",
221
+ api_name="/__call__"
222
+ )
223
+ print(result)
224
+ return result[0]
225
+
226
+
227
  import re
228
  import torch
229
  from transformers import pipeline
 
311
  elif chosen_model == "Stable Audio Open" :
312
  gr.Info("Now calling Stable Audio Open for music...")
313
  music_o = get_stable_audio_open(musical_prompt)
314
+ elif chosen_model == "ACE Step" :
315
+ gr.Info("Now calling ACE Step for music...")
316
+ music_o = get_ace(musical_prompt)
317
 
318
  return gr.update(value=musical_prompt, interactive=True), gr.update(visible=True), music_o
319
 
 
339
  elif chosen_model == "Stable Audio Open" :
340
  gr.Info("Now calling Stable Audio Open for music...")
341
  music_o = get_stable_audio_open(musical_prompt)
342
+ elif chosen_model == "ACE Step" :
343
+ gr.Info("Now calling ACE Step for music...")
344
+ music_o = get_ace(musical_prompt)
345
 
346
  return music_o
347
 
 
386
  label = "Choose a model",
387
  choices = [
388
  #"MAGNet",
389
+ "ACE Step",
390
  "AudioLDM-2",
391
  "Riffusion",
392
  "Mustango",