chong.zhang commited on
Commit
b6363bb
·
1 Parent(s): 5827423

update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -36
app.py CHANGED
@@ -28,12 +28,10 @@ sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
28
 
29
  import spaces
30
  import gradio as gr
31
- from inspiremusic.cli.inference import InspireMusicUnified, set_env_variables
32
  import torchaudio
33
  import datetime
34
  import hashlib
35
- import threading
36
- import time
37
  import importlib
38
 
39
  MODELS = ["InspireMusic-1.5B-Long", "InspireMusic-1.5B", "InspireMusic-Base", "InspireMusic-1.5B-24kHz", "InspireMusic-Base-24kHz"]
@@ -47,16 +45,6 @@ DEMO_TEXT_PROMPTS = ["Jazz music with drum beats.",
47
  "The classical instrumental piece exudes a haunting and evocative atmosphere, characterized by its intricate guitar work and profound emotional depth.",
48
  "Experience a dynamic blend of instrumental electronic music with futuristic house vibes, featuring energetic beats and a captivating rhythm. The tracks are likely instrumental, focusing on the immersive soundscapes rather than vocal performances."]
49
 
50
- # Shared flag to control the process
51
- stop_flag = threading.Event()
52
-
53
- def cancel_process():
54
- """
55
- Sets the stop_flag to stop the long-running process.
56
- """
57
- stop_flag.set()
58
- return "Cancellation requested. Please wait for the process to stop."
59
-
60
  def generate_filename():
61
  hash_object = hashlib.sha256(str(int(datetime.datetime.now().timestamp())).encode())
62
  hash_string = hash_object.hexdigest()
@@ -116,8 +104,8 @@ def trim_audio(audio_file, cut_seconds=5):
116
 
117
  @spaces.GPU(duration=120)
118
  def music_generation(args):
119
- set_env_variables()
120
- model = InspireMusicUnified(
121
  model_name=args["model_name"],
122
  model_dir=args["model_dir"],
123
  min_generate_audio_seconds=args["min_generate_audio_seconds"],
@@ -163,18 +151,6 @@ def demo_inspiremusic_con(text, audio, model_name, chorus,
163
  max_generate_audio_seconds=max_generate_audio_seconds)
164
  return music_generation(args)
165
 
166
- def process(args, progress=gr.Progress()):
167
- progress(0, desc="Starting process...")
168
- idx = 1
169
- for i in range(idx):
170
- if stop_flag.is_set():
171
- progress(i / idx, desc="Process canceled.")
172
- break
173
- music_generation(args)
174
- time.sleep(1)
175
- progress((i + 1) / idx, desc=f"Processing step {i + 1}/{idx}")
176
- return "Process completed successfully."
177
-
178
  def main():
179
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
180
  gr.Markdown("""
@@ -209,7 +185,7 @@ def main():
209
  music_output = gr.Audio(label="Generated Music", type="filepath", autoplay=True, show_download_button = True)
210
 
211
  with gr.Row():
212
- button = gr.Button("Submit Text-to-Music Task")
213
  button.click(demo_inspiremusic_t2m,
214
  inputs=[text_input, model_name,
215
  chorus,
@@ -217,20 +193,13 @@ def main():
217
  max_generate_audio_seconds],
218
  outputs=music_output)
219
 
220
- generate_button = gr.Button("Submit Music Continuation Task")
221
  generate_button.click(demo_inspiremusic_con,
222
  inputs=[text_input, audio_input, model_name,
223
  chorus,
224
  output_sample_rate,
225
  max_generate_audio_seconds],
226
  outputs=music_output)
227
- cancel_button = gr.Button("Cancel")
228
-
229
- cancel_button.click(
230
- fn=cancel_process,
231
- inputs=[],
232
- outputs="Cancel process."
233
- )
234
  t2m_examples = gr.Examples(examples=DEMO_TEXT_PROMPTS, inputs=[text_input])
235
  demo.launch()
236
 
 
28
 
29
  import spaces
30
  import gradio as gr
31
+ from inspiremusic.cli.inference import InspireMusicModel, env_variables
32
  import torchaudio
33
  import datetime
34
  import hashlib
 
 
35
  import importlib
36
 
37
  MODELS = ["InspireMusic-1.5B-Long", "InspireMusic-1.5B", "InspireMusic-Base", "InspireMusic-1.5B-24kHz", "InspireMusic-Base-24kHz"]
 
45
  "The classical instrumental piece exudes a haunting and evocative atmosphere, characterized by its intricate guitar work and profound emotional depth.",
46
  "Experience a dynamic blend of instrumental electronic music with futuristic house vibes, featuring energetic beats and a captivating rhythm. The tracks are likely instrumental, focusing on the immersive soundscapes rather than vocal performances."]
47
 
 
 
 
 
 
 
 
 
 
 
48
  def generate_filename():
49
  hash_object = hashlib.sha256(str(int(datetime.datetime.now().timestamp())).encode())
50
  hash_string = hash_object.hexdigest()
 
104
 
105
  @spaces.GPU(duration=120)
106
  def music_generation(args):
107
+ env_variables()
108
+ model = InspireMusicModel(
109
  model_name=args["model_name"],
110
  model_dir=args["model_dir"],
111
  min_generate_audio_seconds=args["min_generate_audio_seconds"],
 
151
  max_generate_audio_seconds=max_generate_audio_seconds)
152
  return music_generation(args)
153
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  def main():
155
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
156
  gr.Markdown("""
 
185
  music_output = gr.Audio(label="Generated Music", type="filepath", autoplay=True, show_download_button = True)
186
 
187
  with gr.Row():
188
+ button = gr.Button("Start Text-to-Music Task")
189
  button.click(demo_inspiremusic_t2m,
190
  inputs=[text_input, model_name,
191
  chorus,
 
193
  max_generate_audio_seconds],
194
  outputs=music_output)
195
 
196
+ generate_button = gr.Button("Start Music Continuation Task")
197
  generate_button.click(demo_inspiremusic_con,
198
  inputs=[text_input, audio_input, model_name,
199
  chorus,
200
  output_sample_rate,
201
  max_generate_audio_seconds],
202
  outputs=music_output)
 
 
 
 
 
 
 
203
  t2m_examples = gr.Examples(examples=DEMO_TEXT_PROMPTS, inputs=[text_input])
204
  demo.launch()
205