Spaces:
Building
on
L40S
Building
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -325,34 +325,58 @@ def initialize_system():
|
|
325 |
optimize_gpu_settings()
|
326 |
|
327 |
try:
|
328 |
-
# ํ์ฌ ์์
๋๋ ํ ๋ฆฌ ์ ์ฅ
|
329 |
-
original_dir = os.getcwd()
|
330 |
-
|
331 |
# ๊ธฐ๋ณธ ๋๋ ํ ๋ฆฌ ๊ตฌ์กฐ ์์ฑ
|
332 |
-
os.
|
333 |
-
os.makedirs(
|
334 |
-
os.makedirs("
|
335 |
-
os.makedirs("./inference/xcodec_mini_infer", exist_ok=True)
|
336 |
-
os.makedirs("./inference/xcodec_mini_infer/checkpoints", exist_ok=True)
|
337 |
|
338 |
-
#
|
339 |
-
|
340 |
-
logging.info(f"Working directory changed to: {os.getcwd()}")
|
341 |
|
342 |
-
|
343 |
-
|
344 |
-
os.environ["HF_HOME"] = cache_dir
|
345 |
-
os.environ["TRANSFORMERS_CACHE"] = cache_dir
|
346 |
-
os.environ["HF_HUB_CACHE"] = cache_dir
|
347 |
|
348 |
-
# ๋ชจ๋ธ
|
349 |
-
|
|
|
|
|
|
|
|
|
350 |
|
351 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
except Exception as e:
|
354 |
-
logging.error(f"
|
355 |
-
os.chdir(original_dir)
|
356 |
raise
|
357 |
|
358 |
@lru_cache(maxsize=100)
|
@@ -435,24 +459,19 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
435 |
os.makedirs(output_dir, exist_ok=True)
|
436 |
empty_output_folder(output_dir)
|
437 |
|
438 |
-
|
439 |
command = [
|
440 |
"python", "infer.py",
|
441 |
-
"--stage1_model",
|
442 |
-
"--stage2_model", "
|
443 |
-
"--genre_txt",
|
444 |
-
"--lyrics_txt",
|
445 |
"--run_n_segments", str(actual_num_segments),
|
446 |
"--stage2_batch_size", "16",
|
447 |
-
"--output_dir",
|
448 |
"--cuda_idx", "0",
|
449 |
"--max_new_tokens", str(actual_max_tokens),
|
450 |
-
"--disable_offload_model"
|
451 |
-
"--basic_model_config", os.path.abspath("./xcodec_mini_infer/config.json"),
|
452 |
-
"--vocal_decoder_path", os.path.abspath("./xcodec_mini_infer/vocal_decoder.pth"),
|
453 |
-
"--inst_decoder_path", os.path.abspath("./xcodec_mini_infer/inst_decoder.pth")
|
454 |
]
|
455 |
-
|
456 |
|
457 |
|
458 |
env = os.environ.copy()
|
|
|
325 |
optimize_gpu_settings()
|
326 |
|
327 |
try:
|
|
|
|
|
|
|
328 |
# ๊ธฐ๋ณธ ๋๋ ํ ๋ฆฌ ๊ตฌ์กฐ ์์ฑ
|
329 |
+
base_dir = os.path.abspath("./inference")
|
330 |
+
os.makedirs(base_dir, exist_ok=True)
|
331 |
+
os.makedirs(os.path.join(base_dir, "models"), exist_ok=True)
|
|
|
|
|
332 |
|
333 |
+
# xcodec_mini_infer ๋ค์ด๋ก๋
|
334 |
+
from huggingface_hub import snapshot_download
|
|
|
335 |
|
336 |
+
xcodec_path = os.path.join(base_dir, "xcodec_mini_infer")
|
337 |
+
os.makedirs(xcodec_path, exist_ok=True)
|
|
|
|
|
|
|
338 |
|
339 |
+
# xcodec_mini_infer ๋ชจ๋ธ ๋ค์ด๋ก๋
|
340 |
+
snapshot_download(
|
341 |
+
repo_id="m-a-p/xcodec_mini_infer",
|
342 |
+
local_dir=xcodec_path,
|
343 |
+
resume_download=True
|
344 |
+
)
|
345 |
|
346 |
+
# YuE ๋ชจ๋ธ๋ค ๋ค์ด๋ก๋
|
347 |
+
models = [
|
348 |
+
"m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
|
349 |
+
"m-a-p/YuE-s1-7B-anneal-en-cot",
|
350 |
+
"m-a-p/YuE-s1-7B-anneal-zh-cot",
|
351 |
+
"m-a-p/YuE-s2-1B-general"
|
352 |
+
]
|
353 |
|
354 |
+
for model in models:
|
355 |
+
model_name = model.split('/')[-1]
|
356 |
+
model_path = os.path.join(base_dir, "models", model_name)
|
357 |
+
snapshot_download(
|
358 |
+
repo_id=model,
|
359 |
+
local_dir=model_path,
|
360 |
+
resume_download=True
|
361 |
+
)
|
362 |
+
|
363 |
+
# ์์
๋๋ ํ ๋ฆฌ ๋ณ๊ฒฝ
|
364 |
+
os.chdir(base_dir)
|
365 |
+
logging.info(f"Working directory changed to: {os.getcwd()}")
|
366 |
+
|
367 |
+
# ํ์ํ ํ์ผ ์กด์ฌ ํ์ธ
|
368 |
+
required_files = [
|
369 |
+
os.path.join("xcodec_mini_infer", "config.json"),
|
370 |
+
os.path.join("xcodec_mini_infer", "vocal_decoder.pth"),
|
371 |
+
os.path.join("xcodec_mini_infer", "inst_decoder.pth")
|
372 |
+
]
|
373 |
+
|
374 |
+
for file_path in required_files:
|
375 |
+
if not os.path.exists(file_path):
|
376 |
+
raise FileNotFoundError(f"Required file not found: {file_path}")
|
377 |
+
|
378 |
except Exception as e:
|
379 |
+
logging.error(f"Directory error: {e}")
|
|
|
380 |
raise
|
381 |
|
382 |
@lru_cache(maxsize=100)
|
|
|
459 |
os.makedirs(output_dir, exist_ok=True)
|
460 |
empty_output_folder(output_dir)
|
461 |
|
|
|
462 |
command = [
|
463 |
"python", "infer.py",
|
464 |
+
"--stage1_model", model_path, # ์๋ ๋ชจ๋ธ ๊ฒฝ๋ก ์ฌ์ฉ
|
465 |
+
"--stage2_model", "m-a-p/YuE-s2-1B-general",
|
466 |
+
"--genre_txt", genre_txt_path,
|
467 |
+
"--lyrics_txt", lyrics_txt_path,
|
468 |
"--run_n_segments", str(actual_num_segments),
|
469 |
"--stage2_batch_size", "16",
|
470 |
+
"--output_dir", output_dir,
|
471 |
"--cuda_idx", "0",
|
472 |
"--max_new_tokens", str(actual_max_tokens),
|
473 |
+
"--disable_offload_model"
|
|
|
|
|
|
|
474 |
]
|
|
|
475 |
|
476 |
|
477 |
env = os.environ.copy()
|