Update app.py
Browse files
app.py
CHANGED
@@ -211,30 +211,55 @@ def initialize_system():
|
|
211 |
optimize_gpu_settings()
|
212 |
|
213 |
try:
|
214 |
-
#
|
215 |
-
os.
|
216 |
-
os.
|
217 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
from huggingface_hub import snapshot_download, hf_hub_download
|
220 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
# infer.py ํ์ผ ๋ค์ด๋ก๋
|
222 |
try:
|
223 |
-
|
224 |
repo_id="m-a-p/xcodec_mini_infer",
|
225 |
filename="infer.py",
|
226 |
-
local_dir=
|
227 |
force_download=True
|
228 |
)
|
229 |
-
|
230 |
-
shutil.copy2(infer_script_download, INFER_SCRIPT)
|
231 |
-
logging.info(f"infer.py available at: {INFER_SCRIPT}")
|
232 |
except Exception as e:
|
233 |
-
logging.error(f"
|
234 |
raise
|
235 |
|
236 |
# xcodec_mini_infer ๋ชจ๋ธ ๋ค์ด๋ก๋
|
237 |
-
xcodec_path = os.path.join(
|
238 |
snapshot_download(
|
239 |
repo_id="m-a-p/xcodec_mini_infer",
|
240 |
local_dir=xcodec_path,
|
@@ -251,7 +276,7 @@ def initialize_system():
|
|
251 |
|
252 |
for model in models:
|
253 |
model_name = model.split('/')[-1]
|
254 |
-
model_path = os.path.join(
|
255 |
snapshot_download(
|
256 |
repo_id=model,
|
257 |
local_dir=model_path,
|
@@ -259,12 +284,19 @@ def initialize_system():
|
|
259 |
)
|
260 |
|
261 |
# ์์
๋๋ ํ ๋ฆฌ ๋ณ๊ฒฝ
|
262 |
-
os.chdir(
|
263 |
logging.info(f"Working directory changed to: {os.getcwd()}")
|
264 |
|
|
|
|
|
|
|
|
|
|
|
265 |
# ํ์ผ ์กด์ฌ ํ์ธ
|
266 |
required_files = [
|
267 |
-
|
|
|
|
|
268 |
os.path.join(xcodec_path, "config.json"),
|
269 |
os.path.join(xcodec_path, "vocal_decoder.pth"),
|
270 |
os.path.join(xcodec_path, "inst_decoder.pth")
|
@@ -284,7 +316,6 @@ def initialize_system():
|
|
284 |
raise
|
285 |
|
286 |
|
287 |
-
|
288 |
@lru_cache(maxsize=100)
|
289 |
def get_cached_file_path(content_hash, prefix):
|
290 |
return create_temp_file(content_hash, prefix)
|
@@ -366,6 +397,9 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
366 |
|
367 |
|
368 |
python_executable = sys.executable or "python" # fallback to "python" if sys.executable is not available
|
|
|
|
|
|
|
369 |
|
370 |
command = [
|
371 |
python_executable,
|
@@ -382,7 +416,6 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
382 |
"--disable_offload_model"
|
383 |
]
|
384 |
|
385 |
-
|
386 |
env = os.environ.copy()
|
387 |
if torch.cuda.is_available():
|
388 |
env.update({
|
@@ -391,7 +424,8 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
|
|
391 |
"PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
|
392 |
"LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}",
|
393 |
"PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512",
|
394 |
-
"CUDA_LAUNCH_BLOCKING": "0"
|
|
|
395 |
})
|
396 |
|
397 |
# transformers ์บ์ ๋ง์ด๊ทธ๋ ์ด์
์ฒ๋ฆฌ
|
|
|
211 |
optimize_gpu_settings()
|
212 |
|
213 |
try:
|
214 |
+
# ์ ๋ ๊ฒฝ๋ก ์ค์
|
215 |
+
app_dir = os.path.abspath(os.path.dirname(__file__))
|
216 |
+
inference_dir = os.path.join(app_dir, "inference")
|
217 |
+
models_dir = os.path.join(inference_dir, "models")
|
218 |
+
|
219 |
+
# ๊ธฐ๋ณธ ๋๋ ํ ๋ฆฌ ๊ตฌ์กฐ ์์ฑ
|
220 |
+
os.makedirs(inference_dir, exist_ok=True)
|
221 |
+
os.makedirs(models_dir, exist_ok=True)
|
222 |
+
os.makedirs(os.path.join(inference_dir, "xcodec_mini_infer"), exist_ok=True)
|
223 |
|
224 |
from huggingface_hub import snapshot_download, hf_hub_download
|
225 |
|
226 |
+
# models ๋ชจ๋ ๋ค์ด๋ก๋
|
227 |
+
try:
|
228 |
+
models_files = [
|
229 |
+
"soundstream_hubert_new.py",
|
230 |
+
"__init__.py"
|
231 |
+
]
|
232 |
+
|
233 |
+
for file_name in models_files:
|
234 |
+
file_path = hf_hub_download(
|
235 |
+
repo_id="m-a-p/xcodec_mini_infer",
|
236 |
+
filename=f"models/{file_name}",
|
237 |
+
local_dir=inference_dir,
|
238 |
+
force_download=True
|
239 |
+
)
|
240 |
+
target_path = os.path.join(models_dir, file_name)
|
241 |
+
os.makedirs(os.path.dirname(target_path), exist_ok=True)
|
242 |
+
shutil.copy2(file_path, target_path)
|
243 |
+
logging.info(f"Copied {file_name} to: {target_path}")
|
244 |
+
except Exception as e:
|
245 |
+
logging.error(f"Error downloading models files: {e}")
|
246 |
+
raise
|
247 |
+
|
248 |
# infer.py ํ์ผ ๋ค์ด๋ก๋
|
249 |
try:
|
250 |
+
infer_script = hf_hub_download(
|
251 |
repo_id="m-a-p/xcodec_mini_infer",
|
252 |
filename="infer.py",
|
253 |
+
local_dir=inference_dir,
|
254 |
force_download=True
|
255 |
)
|
256 |
+
logging.info(f"Downloaded infer.py to: {infer_script}")
|
|
|
|
|
257 |
except Exception as e:
|
258 |
+
logging.error(f"Error downloading infer.py: {e}")
|
259 |
raise
|
260 |
|
261 |
# xcodec_mini_infer ๋ชจ๋ธ ๋ค์ด๋ก๋
|
262 |
+
xcodec_path = os.path.join(inference_dir, "xcodec_mini_infer")
|
263 |
snapshot_download(
|
264 |
repo_id="m-a-p/xcodec_mini_infer",
|
265 |
local_dir=xcodec_path,
|
|
|
276 |
|
277 |
for model in models:
|
278 |
model_name = model.split('/')[-1]
|
279 |
+
model_path = os.path.join(inference_dir, "models", model_name)
|
280 |
snapshot_download(
|
281 |
repo_id=model,
|
282 |
local_dir=model_path,
|
|
|
284 |
)
|
285 |
|
286 |
# ์์
๋๋ ํ ๋ฆฌ ๋ณ๊ฒฝ
|
287 |
+
os.chdir(inference_dir)
|
288 |
logging.info(f"Working directory changed to: {os.getcwd()}")
|
289 |
|
290 |
+
# PYTHONPATH ์ค์
|
291 |
+
if inference_dir not in sys.path:
|
292 |
+
sys.path.insert(0, inference_dir)
|
293 |
+
logging.info(f"Added to PYTHONPATH: {inference_dir}")
|
294 |
+
|
295 |
# ํ์ผ ์กด์ฌ ํ์ธ
|
296 |
required_files = [
|
297 |
+
os.path.join(inference_dir, "infer.py"),
|
298 |
+
os.path.join(models_dir, "soundstream_hubert_new.py"),
|
299 |
+
os.path.join(models_dir, "__init__.py"),
|
300 |
os.path.join(xcodec_path, "config.json"),
|
301 |
os.path.join(xcodec_path, "vocal_decoder.pth"),
|
302 |
os.path.join(xcodec_path, "inst_decoder.pth")
|
|
|
316 |
raise
|
317 |
|
318 |
|
|
|
319 |
@lru_cache(maxsize=100)
|
320 |
def get_cached_file_path(content_hash, prefix):
|
321 |
return create_temp_file(content_hash, prefix)
|
|
|
397 |
|
398 |
|
399 |
python_executable = sys.executable or "python" # fallback to "python" if sys.executable is not available
|
400 |
+
|
401 |
+
|
402 |
+
|
403 |
|
404 |
command = [
|
405 |
python_executable,
|
|
|
416 |
"--disable_offload_model"
|
417 |
]
|
418 |
|
|
|
419 |
env = os.environ.copy()
|
420 |
if torch.cuda.is_available():
|
421 |
env.update({
|
|
|
424 |
"PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
|
425 |
"LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}",
|
426 |
"PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512",
|
427 |
+
"CUDA_LAUNCH_BLOCKING": "0",
|
428 |
+
"PYTHONPATH": f"{os.getcwd()}:{env.get('PYTHONPATH', '')}"
|
429 |
})
|
430 |
|
431 |
# transformers ์บ์ ๋ง์ด๊ทธ๋ ์ด์
์ฒ๋ฆฌ
|