Spaces:
Building
on
L40S
Building
on
L40S
Update app.py
Browse files
app.py
CHANGED
@@ -7,8 +7,10 @@ import torch
|
|
7 |
import logging
|
8 |
import numpy as np
|
9 |
import re
|
|
|
10 |
from concurrent.futures import ThreadPoolExecutor
|
11 |
from functools import lru_cache
|
|
|
12 |
|
13 |
# 로깅 설정
|
14 |
logging.basicConfig(
|
@@ -324,7 +326,6 @@ def get_audio_duration(file_path):
|
|
324 |
logging.error(f"Failed to get audio duration: {e}")
|
325 |
return None
|
326 |
|
327 |
-
|
328 |
def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
329 |
genre_txt_path = None
|
330 |
lyrics_txt_path = None
|
@@ -336,7 +337,6 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
336 |
model_path, config, params = optimize_model_selection(lyrics_txt_content, genre_txt_content)
|
337 |
logging.info(f"Selected model: {model_path}")
|
338 |
logging.info(f"Lyrics analysis: {params}")
|
339 |
-
|
340 |
|
341 |
|
342 |
has_chorus = params['sections']['chorus'] > 0
|
@@ -365,9 +365,10 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
365 |
empty_output_folder(output_dir)
|
366 |
|
367 |
|
|
|
368 |
|
369 |
command = [
|
370 |
-
|
371 |
INFER_SCRIPT,
|
372 |
"--stage1_model", model_path,
|
373 |
"--stage2_model", "m-a-p/YuE-s2-1B-general",
|
@@ -380,7 +381,6 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
380 |
"--max_new_tokens", str(actual_max_tokens),
|
381 |
"--disable_offload_model"
|
382 |
]
|
383 |
-
|
384 |
|
385 |
|
386 |
env = os.environ.copy()
|
|
|
7 |
import logging
|
8 |
import numpy as np
|
9 |
import re
|
10 |
+
import sys # sys 모듈 추가
|
11 |
from concurrent.futures import ThreadPoolExecutor
|
12 |
from functools import lru_cache
|
13 |
+
from datetime import datetime
|
14 |
|
15 |
# 로깅 설정
|
16 |
logging.basicConfig(
|
|
|
326 |
logging.error(f"Failed to get audio duration: {e}")
|
327 |
return None
|
328 |
|
|
|
329 |
def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
330 |
genre_txt_path = None
|
331 |
lyrics_txt_path = None
|
|
|
337 |
model_path, config, params = optimize_model_selection(lyrics_txt_content, genre_txt_content)
|
338 |
logging.info(f"Selected model: {model_path}")
|
339 |
logging.info(f"Lyrics analysis: {params}")
|
|
|
340 |
|
341 |
|
342 |
has_chorus = params['sections']['chorus'] > 0
|
|
|
365 |
empty_output_folder(output_dir)
|
366 |
|
367 |
|
368 |
+
python_executable = sys.executable or "python" # fallback to "python" if sys.executable is not available
|
369 |
|
370 |
command = [
|
371 |
+
python_executable,
|
372 |
INFER_SCRIPT,
|
373 |
"--stage1_model", model_path,
|
374 |
"--stage2_model", "m-a-p/YuE-s2-1B-general",
|
|
|
381 |
"--max_new_tokens", str(actual_max_tokens),
|
382 |
"--disable_offload_model"
|
383 |
]
|
|
|
384 |
|
385 |
|
386 |
env = os.environ.copy()
|