ginipick commited on
Commit
299f10b
ยท
verified ยท
1 Parent(s): 3693724

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -21
app.py CHANGED
@@ -201,41 +201,104 @@ def install_flash_attn():
201
  logging.warning(f"Failed to install flash-attn: {e}")
202
  return False
203
 
204
- def initialize_system():
205
- optimize_gpu_settings()
 
 
 
 
 
206
 
207
- with ThreadPoolExecutor(max_workers=4) as executor:
208
- futures = []
209
-
210
- futures.append(executor.submit(install_flash_attn))
211
-
 
 
212
  from huggingface_hub import snapshot_download
213
 
214
- # ํ•„์š”ํ•œ ๋ชจ๋“  ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
215
- models_to_download = [
216
- "m-a-p/xcodec_mini_infer",
 
 
 
 
 
 
217
  "m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
218
  "m-a-p/YuE-s1-7B-anneal-en-cot",
219
  "m-a-p/YuE-s1-7B-anneal-zh-cot",
220
  "m-a-p/YuE-s2-1B-general"
221
  ]
222
 
223
- for model in models_to_download:
224
- futures.append(executor.submit(
225
- snapshot_download,
226
  repo_id=model,
227
- local_dir=f"./inference/models/{model.split('/')[-1]}",
228
  resume_download=True
229
- ))
230
-
231
- for future in futures:
232
- future.result()
233
-
 
 
 
 
234
  try:
 
 
 
 
 
 
 
235
  os.chdir("./inference")
236
  logging.info(f"Working directory changed to: {os.getcwd()}")
237
- except FileNotFoundError as e:
238
- logging.error(f"Directory error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  raise
240
 
241
  @lru_cache(maxsize=100)
@@ -279,11 +342,16 @@ def get_audio_duration(file_path):
279
  logging.error(f"Failed to get audio duration: {e}")
280
  return None
281
 
 
 
282
  def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
 
 
283
  genre_txt_path = None
284
  lyrics_txt_path = None
285
 
286
  try:
 
287
  model_path, config, params = optimize_model_selection(lyrics_txt_content, genre_txt_content)
288
  logging.info(f"Selected model: {model_path}")
289
  logging.info(f"Lyrics analysis: {params}")
 
201
  logging.warning(f"Failed to install flash-attn: {e}")
202
  return False
203
 
204
+
205
+ def check_model_files():
206
+ required_files = [
207
+ "./xcodec_mini_infer/config.json",
208
+ "./xcodec_mini_infer/vocal_decoder.pth",
209
+ "./xcodec_mini_infer/inst_decoder.pth"
210
+ ]
211
 
212
+ missing_files = [f for f in required_files if not os.path.exists(f)]
213
+ if missing_files:
214
+ logging.warning(f"Missing required files: {missing_files}")
215
+ download_missing_files()
216
+
217
+ def download_missing_files():
218
+ try:
219
  from huggingface_hub import snapshot_download
220
 
221
+ # xcodec_mini_infer ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
222
+ snapshot_download(
223
+ repo_id="m-a-p/xcodec_mini_infer",
224
+ local_dir="./xcodec_mini_infer",
225
+ resume_download=True
226
+ )
227
+
228
+ # YuE ๋ชจ๋ธ๋“ค ๋‹ค์šด๋กœ๋“œ
229
+ models = [
230
  "m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
231
  "m-a-p/YuE-s1-7B-anneal-en-cot",
232
  "m-a-p/YuE-s1-7B-anneal-zh-cot",
233
  "m-a-p/YuE-s2-1B-general"
234
  ]
235
 
236
+ for model in models:
237
+ model_name = model.split('/')[-1]
238
+ snapshot_download(
239
  repo_id=model,
240
+ local_dir=f"./models/{model_name}",
241
  resume_download=True
242
+ )
243
+
244
+ logging.info("All required models downloaded successfully")
245
+ except Exception as e:
246
+ logging.error(f"Error downloading models: {e}")
247
+ raise
248
+ def initialize_system():
249
+ optimize_gpu_settings()
250
+
251
  try:
252
+ # ๊ธฐ๋ณธ ๋””๋ ‰ํ† ๋ฆฌ ๊ตฌ์กฐ ์ƒ์„ฑ
253
+ os.makedirs("./inference", exist_ok=True)
254
+ os.makedirs("./inference/models", exist_ok=True)
255
+ os.makedirs("./inference/models/cache", exist_ok=True)
256
+ os.makedirs("./inference/xcodec_mini_infer", exist_ok=True)
257
+
258
+ # ์ž‘์—… ๋””๋ ‰ํ† ๋ฆฌ ๋ณ€๊ฒฝ
259
  os.chdir("./inference")
260
  logging.info(f"Working directory changed to: {os.getcwd()}")
261
+
262
+ # ๋ชจ๋ธ ํŒŒ์ผ ์ฒดํฌ ๋ฐ ๋‹ค์šด๋กœ๋“œ
263
+ check_model_files()
264
+
265
+ with ThreadPoolExecutor(max_workers=4) as executor:
266
+ futures = []
267
+
268
+ # Flash Attention ์„ค์น˜
269
+ futures.append(executor.submit(install_flash_attn))
270
+
271
+ from huggingface_hub import snapshot_download
272
+
273
+ # ํ•„์š”ํ•œ ๋ชจ๋“  ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
274
+ models_to_download = [
275
+ "m-a-p/xcodec_mini_infer",
276
+ "m-a-p/YuE-s1-7B-anneal-jp-kr-cot",
277
+ "m-a-p/YuE-s1-7B-anneal-en-cot",
278
+ "m-a-p/YuE-s1-7B-anneal-zh-cot",
279
+ "m-a-p/YuE-s2-1B-general"
280
+ ]
281
+
282
+ for model in models_to_download:
283
+ model_name = model.split('/')[-1]
284
+ model_dir = f"./models/{model_name}"
285
+
286
+ if not os.path.exists(model_dir):
287
+ futures.append(executor.submit(
288
+ snapshot_download,
289
+ repo_id=model,
290
+ local_dir=model_dir,
291
+ resume_download=True
292
+ ))
293
+
294
+ # ๋ชจ๋“  ๋‹ค์šด๋กœ๋“œ ์™„๋ฃŒ ๋Œ€๊ธฐ
295
+ for future in futures:
296
+ future.result()
297
+
298
+ logging.info("System initialization completed successfully")
299
+
300
+ except Exception as e:
301
+ logging.error(f"Initialization error: {e}")
302
  raise
303
 
304
  @lru_cache(maxsize=100)
 
342
  logging.error(f"Failed to get audio duration: {e}")
343
  return None
344
 
345
+
346
+
347
  def infer(genre_txt_content, lyrics_txt_content, num_segments, max_new_tokens):
348
+ check_model_files() # ํ•„์š”ํ•œ ํŒŒ์ผ ์ฒดํฌ ๋ฐ ๋‹ค์šด๋กœ๋“œ
349
+
350
  genre_txt_path = None
351
  lyrics_txt_path = None
352
 
353
  try:
354
+
355
  model_path, config, params = optimize_model_selection(lyrics_txt_content, genre_txt_content)
356
  logging.info(f"Selected model: {model_path}")
357
  logging.info(f"Lyrics analysis: {params}")