ginipick commited on
Commit
2515553
Β·
verified Β·
1 Parent(s): 4fb8e24

Create app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +527 -0
app-backup.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import subprocess
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import torch
7
+ import logging
8
+ import numpy as np
9
+ import re
10
+ from concurrent.futures import ThreadPoolExecutor
11
+ from functools import lru_cache
12
+
13
+ # λ‘œκΉ… μ„€μ •
14
+ logging.basicConfig(
15
+ level=logging.INFO,
16
+ format='%(asctime)s - %(levelname)s - %(message)s',
17
+ handlers=[
18
+ logging.FileHandler('yue_generation.log'),
19
+ logging.StreamHandler()
20
+ ]
21
+ )
22
+
23
+ # 가사 뢄석 ν•¨μˆ˜
24
+ def analyze_lyrics(lyrics):
25
+ # 쀄 λ‹¨μœ„λ‘œ 뢄리
26
+ lines = [line.strip() for line in lyrics.split('\n') if line.strip()]
27
+
28
+ # μ„Ήμ…˜ 카운트
29
+ sections = {
30
+ 'verse': 0,
31
+ 'chorus': 0,
32
+ 'bridge': 0,
33
+ 'total_lines': len(lines)
34
+ }
35
+
36
+ current_section = None
37
+ section_lines = {
38
+ 'verse': 0,
39
+ 'chorus': 0,
40
+ 'bridge': 0
41
+ }
42
+
43
+ for line in lines:
44
+ lower_line = line.lower()
45
+ if '[verse]' in lower_line:
46
+ current_section = 'verse'
47
+ sections['verse'] += 1
48
+ elif '[chorus]' in lower_line:
49
+ current_section = 'chorus'
50
+ sections['chorus'] += 1
51
+ elif '[bridge]' in lower_line:
52
+ current_section = 'bridge'
53
+ sections['bridge'] += 1
54
+ elif current_section and line.strip():
55
+ section_lines[current_section] += 1
56
+
57
+ # 총 μ„Ήμ…˜ 수 계산
58
+ total_sections = sections['verse'] + sections['chorus'] + sections['bridge']
59
+
60
+ return sections, total_sections, len(lines), section_lines
61
+
62
+ def calculate_generation_params(lyrics):
63
+ sections, total_sections, total_lines, section_lines = analyze_lyrics(lyrics)
64
+
65
+ # κΈ°λ³Έ μ‹œκ°„ 계산 (초 λ‹¨μœ„)
66
+ time_per_line = {
67
+ 'verse': 4, # verseλŠ” ν•œ 쀄당 4초
68
+ 'chorus': 6, # chorusλŠ” ν•œ 쀄당 6초
69
+ 'bridge': 5 # bridgeλŠ” ν•œ 쀄당 5초
70
+ }
71
+
72
+ # 각 μ„Ήμ…˜λ³„ μ˜ˆμƒ μ‹œκ°„ 계산
73
+ section_durations = {
74
+ 'verse': section_lines['verse'] * time_per_line['verse'],
75
+ 'chorus': section_lines['chorus'] * time_per_line['chorus'],
76
+ 'bridge': section_lines['bridge'] * time_per_line['bridge']
77
+ }
78
+
79
+ total_duration = sum(section_durations.values())
80
+ total_duration = max(60, total_duration) # μ΅œμ†Œ 60초
81
+
82
+ # 토큰 계산 (더 보수적인 κ°’ μ‚¬μš©)
83
+ base_tokens = 3000 # κΈ°λ³Έ 토큰 수
84
+ tokens_per_line = 200 # 쀄당 토큰 수
85
+
86
+ total_tokens = base_tokens + (total_lines * tokens_per_line)
87
+
88
+ # μ„Ήμ…˜ 기반 μ„Έκ·Έλ¨ΌνŠΈ 수 계산
89
+ if sections['chorus'] > 0:
90
+ num_segments = 3 # μ½”λŸ¬μŠ€κ°€ μžˆλŠ” 경우 3개 μ„Έκ·Έλ¨ΌνŠΈ
91
+ else:
92
+ num_segments = 2 # μ½”λŸ¬μŠ€κ°€ μ—†λŠ” 경우 2개 μ„Έκ·Έλ¨ΌνŠΈ
93
+
94
+ # 토큰 수 μ œν•œ
95
+ max_tokens = min(8000, total_tokens) # μ΅œλŒ€ 8000 ν† ν°μœΌλ‘œ μ œν•œ
96
+
97
+ return {
98
+ 'max_tokens': max_tokens,
99
+ 'num_segments': num_segments,
100
+ 'sections': sections,
101
+ 'section_lines': section_lines,
102
+ 'estimated_duration': total_duration,
103
+ 'section_durations': section_durations,
104
+ 'has_chorus': sections['chorus'] > 0
105
+ }
106
+
107
+ def get_audio_duration(file_path):
108
+ try:
109
+ import librosa
110
+ duration = librosa.get_duration(path=file_path)
111
+ return duration
112
+ except Exception as e:
113
+ logging.error(f"Failed to get audio duration: {e}")
114
+ return None
115
+
116
+ # μ–Έμ–΄ 감지 및 λͺ¨λΈ 선택 ν•¨μˆ˜
117
+ def detect_and_select_model(text):
118
+ if re.search(r'[\u3131-\u318E\uAC00-\uD7A3]', text): # ν•œκΈ€
119
+ return "m-a-p/YuE-s1-7B-anneal-jp-kr-cot"
120
+ elif re.search(r'[\u4e00-\u9fff]', text): # 쀑ꡭ어
121
+ return "m-a-p/YuE-s1-7B-anneal-zh-cot"
122
+ elif re.search(r'[\u3040-\u309F\u30A0-\u30FF]', text): # 일본어
123
+ return "m-a-p/YuE-s1-7B-anneal-jp-kr-cot"
124
+ else: # μ˜μ–΄/기타
125
+ return "m-a-p/YuE-s1-7B-anneal-en-cot"
126
+
127
+
128
+
129
+ # GPU μ„€μ • μ΅œμ ν™”
130
+ def optimize_gpu_settings():
131
+ if torch.cuda.is_available():
132
+ torch.backends.cuda.matmul.allow_tf32 = True
133
+ torch.backends.cudnn.benchmark = True
134
+ torch.backends.cudnn.deterministic = False
135
+ torch.backends.cudnn.enabled = True
136
+
137
+ torch.cuda.empty_cache()
138
+ torch.cuda.set_device(0)
139
+
140
+ logging.info(f"Using GPU: {torch.cuda.get_device_name(0)}")
141
+ logging.info(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")
142
+ else:
143
+ logging.warning("GPU not available!")
144
+
145
+ def install_flash_attn():
146
+ try:
147
+ if not torch.cuda.is_available():
148
+ logging.warning("GPU not available, skipping flash-attn installation")
149
+ return False
150
+
151
+ cuda_version = torch.version.cuda
152
+ if cuda_version is None:
153
+ logging.warning("CUDA not available, skipping flash-attn installation")
154
+ return False
155
+
156
+ logging.info(f"Detected CUDA version: {cuda_version}")
157
+
158
+ try:
159
+ import flash_attn
160
+ logging.info("flash-attn already installed")
161
+ return True
162
+ except ImportError:
163
+ logging.info("Installing flash-attn...")
164
+
165
+ try:
166
+ subprocess.run(
167
+ ["pip", "install", "flash-attn", "--no-build-isolation"],
168
+ check=True,
169
+ capture_output=True
170
+ )
171
+ logging.info("flash-attn installed successfully!")
172
+ return True
173
+ except subprocess.CalledProcessError:
174
+ logging.warning("Failed to install flash-attn via pip, skipping...")
175
+ return False
176
+
177
+ except Exception as e:
178
+ logging.warning(f"Failed to install flash-attn: {e}")
179
+ return False
180
+
181
+ def initialize_system():
182
+ optimize_gpu_settings()
183
+ has_flash_attn = install_flash_attn()
184
+
185
+ from huggingface_hub import snapshot_download
186
+
187
+ folder_path = './inference/xcodec_mini_infer'
188
+ os.makedirs(folder_path, exist_ok=True)
189
+ logging.info(f"Created folder at: {folder_path}")
190
+
191
+ snapshot_download(
192
+ repo_id="m-a-p/xcodec_mini_infer",
193
+ local_dir="./inference/xcodec_mini_infer",
194
+ resume_download=True
195
+ )
196
+
197
+ try:
198
+ os.chdir("./inference")
199
+ logging.info(f"Working directory changed to: {os.getcwd()}")
200
+ except FileNotFoundError as e:
201
+ logging.error(f"Directory error: {e}")
202
+ raise
203
+
204
+ @lru_cache(maxsize=50)
205
+ def get_cached_file_path(content_hash, prefix):
206
+ return create_temp_file(content_hash, prefix)
207
+
208
+ def empty_output_folder(output_dir):
209
+ try:
210
+ shutil.rmtree(output_dir)
211
+ os.makedirs(output_dir)
212
+ logging.info(f"Output folder cleaned: {output_dir}")
213
+ except Exception as e:
214
+ logging.error(f"Error cleaning output folder: {e}")
215
+ raise
216
+
217
+ def create_temp_file(content, prefix, suffix=".txt"):
218
+ temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", prefix=prefix, suffix=suffix)
219
+ content = content.strip() + "\n\n"
220
+ content = content.replace("\r\n", "\n").replace("\r", "\n")
221
+ temp_file.write(content)
222
+ temp_file.close()
223
+ logging.debug(f"Temporary file created: {temp_file.name}")
224
+ return temp_file.name
225
+
226
+ def get_last_mp3_file(output_dir):
227
+ mp3_files = [f for f in os.listdir(output_dir) if f.endswith('.mp3')]
228
+ if not mp3_files:
229
+ logging.warning("No MP3 files found")
230
+ return None
231
+
232
+ mp3_files_with_path = [os.path.join(output_dir, f) for f in mp3_files]
233
+ mp3_files_with_path.sort(key=os.path.getmtime, reverse=True)
234
+ return mp3_files_with_path[0]
235
+
236
+ def optimize_model_selection(lyrics, genre):
237
+ model_path = detect_and_select_model(lyrics)
238
+ params = calculate_generation_params(lyrics)
239
+
240
+ # μ½”λŸ¬μŠ€ 쑴재 여뢀에 λ”°λ₯Έ μ„€μ • μ‘°μ •
241
+ has_chorus = params['sections']['chorus'] > 0
242
+
243
+ # 토큰 수 계산
244
+ tokens_per_segment = params['max_tokens'] // params['num_segments']
245
+
246
+ model_config = {
247
+ "m-a-p/YuE-s1-7B-anneal-en-cot": {
248
+ "max_tokens": params['max_tokens'],
249
+ "temperature": 0.8,
250
+ "batch_size": 8,
251
+ "num_segments": params['num_segments'],
252
+ "estimated_duration": params['estimated_duration']
253
+ },
254
+ "m-a-p/YuE-s1-7B-anneal-jp-kr-cot": {
255
+ "max_tokens": params['max_tokens'],
256
+ "temperature": 0.7,
257
+ "batch_size": 8,
258
+ "num_segments": params['num_segments'],
259
+ "estimated_duration": params['estimated_duration']
260
+ },
261
+ "m-a-p/YuE-s1-7B-anneal-zh-cot": {
262
+ "max_tokens": params['max_tokens'],
263
+ "temperature": 0.7,
264
+ "batch_size": 8,
265
+ "num_segments": params['num_segments'],
266
+ "estimated_duration": params['estimated_duration']
267
+ }
268
+ }
269
+
270
+ # μ½”λŸ¬μŠ€κ°€ μžˆλŠ” 경우 토큰 수 증가
271
+ if has_chorus:
272
+ for config in model_config.values():
273
+ config['max_tokens'] = int(config['max_tokens'] * 1.5) # 50% 더 λ§Žμ€ 토큰 ν• λ‹Ή
274
+
275
+ return model_path, model_config[model_path], params
276
+
277
+ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
278
+ genre_txt_path = None
279
+ lyrics_txt_path = None
280
+
281
+ try:
282
+ # λͺ¨λΈ 선택 및 μ„€μ •
283
+ model_path, config, params = optimize_model_selection(lyrics_txt_content, genre_txt_content)
284
+ logging.info(f"Selected model: {model_path}")
285
+ logging.info(f"Lyrics analysis: {params}")
286
+
287
+ # μ½”λŸ¬μŠ€ μ„Ήμ…˜ 확인 및 λ‘œκΉ…
288
+ has_chorus = params['sections']['chorus'] > 0
289
+ estimated_duration = params.get('estimated_duration', 90)
290
+
291
+
292
+ # 토큰 μˆ˜μ™€ μ„Έκ·Έλ¨ΌνŠΈ 수 μ‘°μ •
293
+ if has_chorus:
294
+ actual_max_tokens = min(8000, int(config['max_tokens'] * 1.2)) # 20% 증가, μ΅œλŒ€ 8000
295
+ actual_num_segments = 3
296
+ else:
297
+ actual_max_tokens = config['max_tokens']
298
+ actual_num_segments = 2
299
+
300
+
301
+
302
+ logging.info(f"Estimated duration: {estimated_duration} seconds")
303
+ logging.info(f"Has chorus sections: {has_chorus}")
304
+ logging.info(f"Using segments: {actual_num_segments}, tokens: {actual_max_tokens}")
305
+
306
+ # μž„μ‹œ 파일 생성
307
+ genre_txt_path = create_temp_file(genre_txt_content, prefix="genre_")
308
+ lyrics_txt_path = create_temp_file(lyrics_txt_content, prefix="lyrics_")
309
+
310
+ output_dir = "./output"
311
+ os.makedirs(output_dir, exist_ok=True)
312
+ empty_output_folder(output_dir)
313
+ # κΈ°λ³Έ λͺ…λ Ήμ–΄ ꡬ성
314
+ command = [
315
+ "python", "infer.py",
316
+ "--stage1_model", model_path,
317
+ "--stage2_model", "m-a-p/YuE-s2-1B-general",
318
+ "--genre_txt", genre_txt_path,
319
+ "--lyrics_txt", lyrics_txt_path,
320
+ "--run_n_segments", str(actual_num_segments),
321
+ "--stage2_batch_size", "4", # 배치 μ‚¬μ΄μ¦ˆ κ°μ†Œ
322
+ "--output_dir", output_dir,
323
+ "--cuda_idx", "0",
324
+ "--max_new_tokens", str(actual_max_tokens)
325
+ ]
326
+
327
+ # GPU μ„€μ •
328
+ if torch.cuda.is_available():
329
+ command.append("--disable_offload_model")
330
+ # GPU μ„€μ •
331
+
332
+
333
+ # CUDA ν™˜κ²½ λ³€μˆ˜ μ„€μ •
334
+ env = os.environ.copy()
335
+ if torch.cuda.is_available():
336
+ env.update({
337
+ "CUDA_VISIBLE_DEVICES": "0",
338
+ "CUDA_HOME": "/usr/local/cuda",
339
+ "PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
340
+ "LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}",
341
+ "PYTORCH_CUDA_ALLOC_CONF": f"max_split_size_mb:512"
342
+ })
343
+
344
+ # transformers μΊμ‹œ λ§ˆμ΄κ·Έλ ˆμ΄μ…˜ 처리
345
+ try:
346
+ from transformers.utils import move_cache
347
+ move_cache()
348
+ except Exception as e:
349
+ logging.warning(f"Cache migration warning (non-critical): {e}")
350
+
351
+ # λͺ…λ Ή μ‹€ν–‰
352
+ process = subprocess.run(
353
+ command,
354
+ env=env,
355
+ check=False,
356
+ capture_output=True,
357
+ text=True
358
+ )
359
+
360
+ # μ‹€ν–‰ κ²°κ³Ό λ‘œκΉ…
361
+ logging.info(f"Command output: {process.stdout}")
362
+ if process.stderr:
363
+ logging.error(f"Command error: {process.stderr}")
364
+
365
+ if process.returncode != 0:
366
+ logging.error(f"Command failed with return code: {process.returncode}")
367
+ logging.error(f"Command: {' '.join(command)}")
368
+ raise RuntimeError(f"Inference failed: {process.stderr}")
369
+
370
+ # 결과 처리
371
+ last_mp3 = get_last_mp3_file(output_dir)
372
+ if last_mp3:
373
+ try:
374
+ duration = get_audio_duration(last_mp3)
375
+ logging.info(f"Generated audio file: {last_mp3}")
376
+ if duration:
377
+ logging.info(f"Audio duration: {duration:.2f} seconds")
378
+ logging.info(f"Expected duration: {estimated_duration} seconds")
379
+
380
+ # μƒμ„±λœ μŒμ•…μ΄ λ„ˆλ¬΄ 짧은 경우 κ²½κ³ 
381
+ if duration < estimated_duration * 0.8:
382
+ logging.warning(f"Generated audio is shorter than expected: {duration:.2f}s < {estimated_duration:.2f}s")
383
+ except Exception as e:
384
+ logging.warning(f"Failed to get audio duration: {e}")
385
+ return last_mp3
386
+ else:
387
+ logging.warning("No output audio file generated")
388
+ return None
389
+
390
+ except Exception as e:
391
+ logging.error(f"Inference error: {e}")
392
+ raise
393
+ finally:
394
+ # μž„μ‹œ 파일 정리
395
+ if genre_txt_path and os.path.exists(genre_txt_path):
396
+ try:
397
+ os.remove(genre_txt_path)
398
+ logging.debug(f"Removed temporary file: {genre_txt_path}")
399
+ except Exception as e:
400
+ logging.warning(f"Failed to remove temporary file {genre_txt_path}: {e}")
401
+
402
+ if lyrics_txt_path and os.path.exists(lyrics_txt_path):
403
+ try:
404
+ os.remove(lyrics_txt_path)
405
+ logging.debug(f"Removed temporary file: {lyrics_txt_path}")
406
+ except Exception as e:
407
+ logging.warning(f"Failed to remove temporary file {lyrics_txt_path}: {e}")
408
+
409
+ def main():
410
+ # Gradio μΈν„°νŽ˜μ΄μŠ€
411
+ with gr.Blocks() as demo:
412
+ with gr.Column():
413
+ gr.Markdown("# Open SUNO: Full-Song Generation (Multi-Language Support)")
414
+
415
+
416
+ with gr.Row():
417
+ with gr.Column():
418
+ genre_txt = gr.Textbox(
419
+ label="Genre",
420
+ placeholder="Enter music genre and style descriptions..."
421
+ )
422
+ lyrics_txt = gr.Textbox(
423
+ label="Lyrics (Supports English, Korean, Japanese, Chinese)",
424
+ placeholder="Enter song lyrics with [verse], [chorus], [bridge] tags...",
425
+ lines=10
426
+ )
427
+
428
+ with gr.Column():
429
+ num_segments = gr.Number(
430
+ label="Number of Song Segments (Auto-adjusted based on lyrics)",
431
+ value=2,
432
+ minimum=1,
433
+ maximum=4,
434
+ step=1,
435
+ interactive=False
436
+ )
437
+ max_new_tokens = gr.Slider(
438
+ label="Max New Tokens (Auto-adjusted based on lyrics)",
439
+ minimum=500,
440
+ maximum=32000,
441
+ step=500,
442
+ value=4000,
443
+ interactive=False
444
+ )
445
+ with gr.Row():
446
+ duration_info = gr.Label(label="Estimated Duration")
447
+ sections_info = gr.Label(label="Section Information")
448
+ submit_btn = gr.Button("Generate Music", variant="primary")
449
+ music_out = gr.Audio(label="Generated Audio")
450
+
451
+ # λ‹€κ΅­μ–΄ 예제
452
+ gr.Examples(
453
+ examples=[
454
+ # μ˜μ–΄ 예제
455
+ [
456
+ "female blues airy vocal bright vocal piano sad romantic guitar jazz",
457
+ """[verse]
458
+ In the quiet of the evening, shadows start to fall
459
+ Whispers of the night wind echo through the hall
460
+ Lost within the silence, I hear your gentle voice
461
+ Guiding me back homeward, making my heart rejoice
462
+
463
+ [chorus]
464
+ Don't let this moment fade, hold me close tonight
465
+ With you here beside me, everything's alright
466
+ Can't imagine life alone, don't want to let you go
467
+ Stay with me forever, let our love just flow
468
+ """
469
+ ],
470
+ # ν•œκ΅­μ–΄ 예제
471
+ [
472
+ "K-pop bright energetic synth dance electronic",
473
+ """[verse]
474
+ λΉ›λ‚˜λŠ” λ³„λ“€μ²˜λŸΌ 우리의 꿈이
475
+ μ € ν•˜λŠ˜μ„ μˆ˜λ†“μ•„ λ°˜μ§μ΄λ„€
476
+ ν•¨κ»˜λΌλ©΄ μ–΄λ””λ“  갈 수 μžˆμ–΄
477
+
478
+ [chorus]
479
+ λ‹¬λ €κ°€μž 더 높이 더 멀리
480
+
481
+ """
482
+ ]
483
+ ],
484
+ inputs=[genre_txt, lyrics_txt]
485
+ )
486
+
487
+ # μ‹œμŠ€ν…œ μ΄ˆκΈ°ν™”
488
+ initialize_system()
489
+
490
+ def update_info(lyrics):
491
+ if not lyrics:
492
+ return "No lyrics entered", "No sections detected"
493
+ params = calculate_generation_params(lyrics)
494
+ duration = params['estimated_duration']
495
+ sections = params['sections']
496
+ return (
497
+ f"Estimated duration: {duration:.1f} seconds",
498
+ f"Verses: {sections['verse']}, Chorus: {sections['chorus']} (Expected full length including chorus)"
499
+ )
500
+
501
+
502
+
503
+ # 이벀트 ν•Έλ“€λŸ¬
504
+ lyrics_txt.change(
505
+ fn=update_info,
506
+ inputs=[lyrics_txt],
507
+ outputs=[duration_info, sections_info]
508
+ )
509
+
510
+ submit_btn.click(
511
+ fn=infer,
512
+ inputs=[genre_txt, lyrics_txt, num_segments, max_new_tokens],
513
+ outputs=[music_out]
514
+ )
515
+
516
+ return demo
517
+
518
+ if __name__ == "__main__":
519
+ demo = main()
520
+ demo.queue(max_size=20).launch(
521
+ server_name="0.0.0.0",
522
+ server_port=7860,
523
+ share=True,
524
+ show_api=True,
525
+ show_error=True,
526
+ max_threads=2
527
+ )