ginipick commited on
Commit
f08699a
ยท
verified ยท
1 Parent(s): 299f10b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -46
app.py CHANGED
@@ -203,27 +203,35 @@ def install_flash_attn():
203
 
204
 
205
  def check_model_files():
 
206
  required_files = [
207
- "./xcodec_mini_infer/config.json",
208
- "./xcodec_mini_infer/vocal_decoder.pth",
209
- "./xcodec_mini_infer/inst_decoder.pth"
210
  ]
211
 
212
  missing_files = [f for f in required_files if not os.path.exists(f)]
213
  if missing_files:
214
  logging.warning(f"Missing required files: {missing_files}")
215
  download_missing_files()
 
 
 
 
 
216
 
217
  def download_missing_files():
218
  try:
219
  from huggingface_hub import snapshot_download
220
 
221
  # xcodec_mini_infer ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
222
- snapshot_download(
223
  repo_id="m-a-p/xcodec_mini_infer",
224
  local_dir="./xcodec_mini_infer",
225
- resume_download=True
 
226
  )
 
227
 
228
  # YuE ๋ชจ๋ธ๋“ค ๋‹ค์šด๋กœ๋“œ
229
  models = [
@@ -235,70 +243,57 @@ def download_missing_files():
235
 
236
  for model in models:
237
  model_name = model.split('/')[-1]
238
- snapshot_download(
239
  repo_id=model,
240
  local_dir=f"./models/{model_name}",
241
- resume_download=True
 
242
  )
 
243
 
244
  logging.info("All required models downloaded successfully")
245
  except Exception as e:
246
  logging.error(f"Error downloading models: {e}")
247
  raise
 
248
  def initialize_system():
249
  optimize_gpu_settings()
250
 
251
  try:
 
 
 
252
  # ๊ธฐ๋ณธ ๋””๋ ‰ํ† ๋ฆฌ ๊ตฌ์กฐ ์ƒ์„ฑ
253
  os.makedirs("./inference", exist_ok=True)
254
  os.makedirs("./inference/models", exist_ok=True)
255
  os.makedirs("./inference/models/cache", exist_ok=True)
256
  os.makedirs("./inference/xcodec_mini_infer", exist_ok=True)
257
 
258
- # ์ž‘์—… ๋””๋ ‰ํ† ๋ฆฌ ๋ณ€๊ฒฝ
259
  os.chdir("./inference")
260
  logging.info(f"Working directory changed to: {os.getcwd()}")
261
 
 
 
 
 
262
  # ๋ชจ๋ธ ํŒŒ์ผ ์ฒดํฌ ๋ฐ ๋‹ค์šด๋กœ๋“œ
263
  check_model_files()
264
 
265
- with ThreadPoolExecutor(max_workers=4) as executor:
266
- futures = []
267
-
268
- # Flash Attention ์„ค์น˜
269
- futures.append(executor.submit(install_flash_attn))
270
-
271
- from huggingface_hub import snapshot_download
272
-
273
- # ํ•„์š”ํ•œ ๋ชจ๋“  ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
274
- models_to_download = [
275
- "m-a-p/xcodec_mini_infer",
276
- "m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
277
- "m-a-p/YuE-s1-7B-anneal-en-cot",
278
- "m-a-p/YuE-s1-7B-anneal-zh-cot",
279
- "m-a-p/YuE-s2-1B-general"
280
- ]
281
-
282
- for model in models_to_download:
283
- model_name = model.split('/')[-1]
284
- model_dir = f"./models/{model_name}"
285
-
286
- if not os.path.exists(model_dir):
287
- futures.append(executor.submit(
288
- snapshot_download,
289
- repo_id=model,
290
- local_dir=model_dir,
291
- resume_download=True
292
- ))
293
-
294
- # ๋ชจ๋“  ๋‹ค์šด๋กœ๋“œ ์™„๋ฃŒ ๋Œ€๊ธฐ
295
- for future in futures:
296
- future.result()
297
 
298
  logging.info("System initialization completed successfully")
299
 
300
  except Exception as e:
301
  logging.error(f"Initialization error: {e}")
 
302
  raise
303
 
304
  @lru_cache(maxsize=100)
@@ -381,22 +376,25 @@ def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
381
  os.makedirs(output_dir, exist_ok=True)
382
  empty_output_folder(output_dir)
383
 
 
384
  command = [
385
  "python", "infer.py",
386
  "--stage1_model", f"./models/{model_path.split('/')[-1]}",
387
  "--stage2_model", "./models/YuE-s2-1B-general",
388
- "--genre_txt", genre_txt_path,
389
- "--lyrics_txt", lyrics_txt_path,
390
  "--run_n_segments", str(actual_num_segments),
391
  "--stage2_batch_size", "16",
392
- "--output_dir", output_dir,
393
  "--cuda_idx", "0",
394
  "--max_new_tokens", str(actual_max_tokens),
395
  "--disable_offload_model",
396
- "--basic_model_config", "./xcodec_mini_infer/config.json",
397
- "--vocal_decoder_path", "./xcodec_mini_infer/vocal_decoder.pth",
398
- "--inst_decoder_path", "./xcodec_mini_infer/inst_decoder.pth"
399
  ]
 
 
400
 
401
  env = os.environ.copy()
402
  if torch.cuda.is_available():
 
203
 
204
 
205
  def check_model_files():
206
+ base_dir = os.getcwd()
207
  required_files = [
208
+ os.path.join(base_dir, "xcodec_mini_infer", "config.json"),
209
+ os.path.join(base_dir, "xcodec_mini_infer", "vocal_decoder.pth"),
210
+ os.path.join(base_dir, "xcodec_mini_infer", "inst_decoder.pth")
211
  ]
212
 
213
  missing_files = [f for f in required_files if not os.path.exists(f)]
214
  if missing_files:
215
  logging.warning(f"Missing required files: {missing_files}")
216
  download_missing_files()
217
+
218
+ # ํŒŒ์ผ ์กด์žฌ ํ™•์ธ
219
+ for file_path in required_files:
220
+ if not os.path.exists(file_path):
221
+ raise FileNotFoundError(f"Required file still missing after download: {file_path}")
222
 
223
  def download_missing_files():
224
  try:
225
  from huggingface_hub import snapshot_download
226
 
227
  # xcodec_mini_infer ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
228
+ xcodec_path = snapshot_download(
229
  repo_id="m-a-p/xcodec_mini_infer",
230
  local_dir="./xcodec_mini_infer",
231
+ resume_download=True,
232
+ force_download=True
233
  )
234
+ logging.info(f"Downloaded xcodec_mini_infer to: {xcodec_path}")
235
 
236
  # YuE ๋ชจ๋ธ๋“ค ๋‹ค์šด๋กœ๋“œ
237
  models = [
 
243
 
244
  for model in models:
245
  model_name = model.split('/')[-1]
246
+ model_path = snapshot_download(
247
  repo_id=model,
248
  local_dir=f"./models/{model_name}",
249
+ resume_download=True,
250
+ force_download=True
251
  )
252
+ logging.info(f"Downloaded {model_name} to: {model_path}")
253
 
254
  logging.info("All required models downloaded successfully")
255
  except Exception as e:
256
  logging.error(f"Error downloading models: {e}")
257
  raise
258
+
259
  def initialize_system():
260
  optimize_gpu_settings()
261
 
262
  try:
263
+ # ํ˜„์žฌ ์ž‘์—… ๋””๋ ‰ํ† ๋ฆฌ ์ €์žฅ
264
+ original_dir = os.getcwd()
265
+
266
  # ๊ธฐ๋ณธ ๋””๋ ‰ํ† ๋ฆฌ ๊ตฌ์กฐ ์ƒ์„ฑ
267
  os.makedirs("./inference", exist_ok=True)
268
  os.makedirs("./inference/models", exist_ok=True)
269
  os.makedirs("./inference/models/cache", exist_ok=True)
270
  os.makedirs("./inference/xcodec_mini_infer", exist_ok=True)
271
 
272
+ # inference ๋””๋ ‰ํ† ๋ฆฌ๋กœ ์ด๋™
273
  os.chdir("./inference")
274
  logging.info(f"Working directory changed to: {os.getcwd()}")
275
 
276
+ # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
277
+ os.environ["HF_HOME"] = os.path.abspath("./models/cache")
278
+ os.environ["TRANSFORMERS_CACHE"] = os.path.abspath("./models/cache")
279
+
280
  # ๋ชจ๋ธ ํŒŒ์ผ ์ฒดํฌ ๋ฐ ๋‹ค์šด๋กœ๋“œ
281
  check_model_files()
282
 
283
+ # GPU ์„ค์ • ํ™•์ธ
284
+ if torch.cuda.is_available():
285
+ torch.cuda.empty_cache()
286
+ device = torch.device("cuda")
287
+ logging.info(f"Using GPU device: {torch.cuda.get_device_name(0)}")
288
+ else:
289
+ device = torch.device("cpu")
290
+ logging.warning("GPU not available, using CPU")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
  logging.info("System initialization completed successfully")
293
 
294
  except Exception as e:
295
  logging.error(f"Initialization error: {e}")
296
+ os.chdir(original_dir) # ์˜ค๋ฅ˜ ๋ฐœ์ƒ ์‹œ ์›๋ž˜ ๋””๋ ‰ํ† ๋ฆฌ๋กœ ๋ณต๊ท€
297
  raise
298
 
299
  @lru_cache(maxsize=100)
 
376
  os.makedirs(output_dir, exist_ok=True)
377
  empty_output_folder(output_dir)
378
 
379
+
380
  command = [
381
  "python", "infer.py",
382
  "--stage1_model", f"./models/{model_path.split('/')[-1]}",
383
  "--stage2_model", "./models/YuE-s2-1B-general",
384
+ "--genre_txt", os.path.abspath(genre_txt_path),
385
+ "--lyrics_txt", os.path.abspath(lyrics_txt_path),
386
  "--run_n_segments", str(actual_num_segments),
387
  "--stage2_batch_size", "16",
388
+ "--output_dir", os.path.abspath(output_dir),
389
  "--cuda_idx", "0",
390
  "--max_new_tokens", str(actual_max_tokens),
391
  "--disable_offload_model",
392
+ "--basic_model_config", os.path.abspath("./xcodec_mini_infer/config.json"),
393
+ "--vocal_decoder_path", os.path.abspath("./xcodec_mini_infer/vocal_decoder.pth"),
394
+ "--inst_decoder_path", os.path.abspath("./xcodec_mini_infer/inst_decoder.pth")
395
  ]
396
+
397
+
398
 
399
  env = os.environ.copy()
400
  if torch.cuda.is_available():