Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -211,16 +211,22 @@ def initialize_system():
|
|
| 211 |
|
| 212 |
from huggingface_hub import snapshot_download
|
| 213 |
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
for future in futures:
|
| 226 |
future.result()
|
|
@@ -307,11 +313,10 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
| 307 |
os.makedirs(output_dir, exist_ok=True)
|
| 308 |
empty_output_folder(output_dir)
|
| 309 |
|
| 310 |
-
# ์์ ๋ command - ์ง์๋์ง ์๋ ์ธ์ ์ ๊ฑฐ
|
| 311 |
command = [
|
| 312 |
"python", "infer.py",
|
| 313 |
-
"--stage1_model", model_path,
|
| 314 |
-
"--stage2_model", "
|
| 315 |
"--genre_txt", genre_txt_path,
|
| 316 |
"--lyrics_txt", lyrics_txt_path,
|
| 317 |
"--run_n_segments", str(actual_num_segments),
|
|
@@ -319,7 +324,10 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
| 319 |
"--output_dir", output_dir,
|
| 320 |
"--cuda_idx", "0",
|
| 321 |
"--max_new_tokens", str(actual_max_tokens),
|
| 322 |
-
"--disable_offload_model"
|
|
|
|
|
|
|
|
|
|
| 323 |
]
|
| 324 |
|
| 325 |
env = os.environ.copy()
|
|
@@ -330,9 +338,14 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
| 330 |
"PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
|
| 331 |
"LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}",
|
| 332 |
"PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512",
|
| 333 |
-
"CUDA_LAUNCH_BLOCKING": "0"
|
|
|
|
|
|
|
| 334 |
})
|
| 335 |
|
|
|
|
|
|
|
|
|
|
| 336 |
# transformers ์บ์ ๋ง์ด๊ทธ๋ ์ด์
์ฒ๋ฆฌ
|
| 337 |
try:
|
| 338 |
from transformers.utils import move_cache
|
|
|
|
| 211 |
|
| 212 |
from huggingface_hub import snapshot_download
|
| 213 |
|
| 214 |
+
# ํ์ํ ๋ชจ๋ ๋ชจ๋ธ ๋ค์ด๋ก๋
|
| 215 |
+
models_to_download = [
|
| 216 |
+
"m-a-p/xcodec_mini_infer",
|
| 217 |
+
"m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
|
| 218 |
+
"m-a-p/YuE-s1-7B-anneal-en-cot",
|
| 219 |
+
"m-a-p/YuE-s1-7B-anneal-zh-cot",
|
| 220 |
+
"m-a-p/YuE-s2-1B-general"
|
| 221 |
+
]
|
| 222 |
+
|
| 223 |
+
for model in models_to_download:
|
| 224 |
+
futures.append(executor.submit(
|
| 225 |
+
snapshot_download,
|
| 226 |
+
repo_id=model,
|
| 227 |
+
local_dir=f"./inference/models/{model.split('/')[-1]}",
|
| 228 |
+
resume_download=True
|
| 229 |
+
))
|
| 230 |
|
| 231 |
for future in futures:
|
| 232 |
future.result()
|
|
|
|
| 313 |
os.makedirs(output_dir, exist_ok=True)
|
| 314 |
empty_output_folder(output_dir)
|
| 315 |
|
|
|
|
| 316 |
command = [
|
| 317 |
"python", "infer.py",
|
| 318 |
+
"--stage1_model", f"./models/{model_path.split('/')[-1]}",
|
| 319 |
+
"--stage2_model", "./models/YuE-s2-1B-general",
|
| 320 |
"--genre_txt", genre_txt_path,
|
| 321 |
"--lyrics_txt", lyrics_txt_path,
|
| 322 |
"--run_n_segments", str(actual_num_segments),
|
|
|
|
| 324 |
"--output_dir", output_dir,
|
| 325 |
"--cuda_idx", "0",
|
| 326 |
"--max_new_tokens", str(actual_max_tokens),
|
| 327 |
+
"--disable_offload_model",
|
| 328 |
+
"--basic_model_config", "./xcodec_mini_infer/config.json",
|
| 329 |
+
"--vocal_decoder_path", "./xcodec_mini_infer/vocal_decoder.pth",
|
| 330 |
+
"--inst_decoder_path", "./xcodec_mini_infer/inst_decoder.pth"
|
| 331 |
]
|
| 332 |
|
| 333 |
env = os.environ.copy()
|
|
|
|
| 338 |
"PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
|
| 339 |
"LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}",
|
| 340 |
"PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512",
|
| 341 |
+
"CUDA_LAUNCH_BLOCKING": "0",
|
| 342 |
+
"TRANSFORMERS_CACHE": "./models/cache",
|
| 343 |
+
"HF_HOME": "./models/cache"
|
| 344 |
})
|
| 345 |
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
|
| 349 |
# transformers ์บ์ ๋ง์ด๊ทธ๋ ์ด์
์ฒ๋ฆฌ
|
| 350 |
try:
|
| 351 |
from transformers.utils import move_cache
|