aiqtech commited on
Commit
0769ffa
ยท
verified ยท
1 Parent(s): bb8f0a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -33
app.py CHANGED
@@ -103,11 +103,6 @@ os.environ['PYTORCH_NO_CUDA_MEMORY_CACHING'] = '1'
103
  # CUDA ์ดˆ๊ธฐํ™” ๋ฐฉ์ง€
104
  torch.set_grad_enabled(False)
105
 
106
-
107
-
108
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค์— ์ฃผ๊ธฐ์  ์ •๋ฆฌ ์ถ”๊ฐ€
109
- demo.load(periodic_cleanup, every=5) # 5์ดˆ๋งˆ๋‹ค ์ •๋ฆฌ
110
-
111
  # Hugging Face ํ† ํฐ ์„ค์ •
112
  HF_TOKEN = os.getenv("HF_TOKEN")
113
  if HF_TOKEN is None:
@@ -246,12 +241,12 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
246
  formats=["gaussian", "mesh"],
247
  preprocess_image=False,
248
  sparse_structure_sampler_params={
249
- "steps": min(ss_sampling_steps, 8), # ์Šคํ… ์ˆ˜ ์ œํ•œ
250
  "cfg_strength": ss_guidance_strength,
251
- "batch_size": 1 # ๋ฐฐ์น˜ ํฌ๊ธฐ ๋ช…์‹œ์  ์ œํ•œ
252
  },
253
  slat_sampler_params={
254
- "steps": min(slat_sampling_steps, 8), # ์Šคํ… ์ˆ˜ ์ œํ•œ
255
  "cfg_strength": slat_guidance_strength,
256
  "batch_size": 1
257
  },
@@ -263,8 +258,8 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
263
  # ๋น„๋””์˜ค ๋ Œ๋”๋ง ์ตœ์ ํ™”
264
  video = render_utils.render_video(
265
  outputs['gaussian'][0],
266
- num_frames=30, # ํ”„๋ ˆ์ž„ ์ˆ˜ ๊ฐ์†Œ
267
- resolution=384 # ํ•ด์ƒ๋„ ์ œํ•œ
268
  )['color']
269
 
270
  video_geo = render_utils.render_video(
@@ -278,7 +273,7 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
278
  video_geo = [v.cpu().numpy() for v in video_geo]
279
  clear_gpu_memory()
280
 
281
- # ๋‚˜๋จธ์ง€ ์ฒ˜๋ฆฌ
282
  video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
283
  new_trial_id = str(uuid.uuid4())
284
  video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
@@ -294,20 +289,11 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
294
 
295
  except Exception as e:
296
  print(f"Error in image_to_3d: {str(e)}")
297
- g.trellis_pipeline.to('cpu')
 
298
  clear_gpu_memory()
299
  return None, None
300
 
301
- return generated_image
302
- except Exception as e:
303
- print(f"Error in image generation: {str(e)}")
304
- return None
305
- finally:
306
- if torch.cuda.is_available():
307
- torch.cuda.empty_cache()
308
- torch.cuda.synchronize()
309
- gc.collect()
310
-
311
  def clear_gpu_memory():
312
  """GPU ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ๋” ์ฒ ์ €ํ•˜๊ฒŒ ์ •๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜"""
313
  try:
@@ -377,7 +363,6 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
377
  return any(ord('๊ฐ€') <= ord(c) <= ord('ํžฃ') for c in text)
378
 
379
  if contains_korean(prompt):
380
- # Helsinki-NLP/opus-mt-ko-en ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ฒˆ์—ญ
381
  translated = g.translator(prompt)[0]['translation_text']
382
  prompt = translated
383
  print(f"Translated prompt: {prompt}")
@@ -418,16 +403,6 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
418
  torch.cuda.empty_cache()
419
  torch.cuda.synchronize()
420
  gc.collect()
421
-
422
- return generated_image
423
- except Exception as e:
424
- print(f"Error in image generation: {str(e)}")
425
- return None
426
- finally:
427
- if torch.cuda.is_available():
428
- torch.cuda.empty_cache()
429
- torch.cuda.synchronize()
430
- gc.collect()
431
 
432
  css = """
433
  footer {
 
103
  # CUDA ์ดˆ๊ธฐํ™” ๋ฐฉ์ง€
104
  torch.set_grad_enabled(False)
105
 
 
 
 
 
 
106
  # Hugging Face ํ† ํฐ ์„ค์ •
107
  HF_TOKEN = os.getenv("HF_TOKEN")
108
  if HF_TOKEN is None:
 
241
  formats=["gaussian", "mesh"],
242
  preprocess_image=False,
243
  sparse_structure_sampler_params={
244
+ "steps": min(ss_sampling_steps, 8),
245
  "cfg_strength": ss_guidance_strength,
246
+ "batch_size": 1
247
  },
248
  slat_sampler_params={
249
+ "steps": min(slat_sampling_steps, 8),
250
  "cfg_strength": slat_guidance_strength,
251
  "batch_size": 1
252
  },
 
258
  # ๋น„๋””์˜ค ๋ Œ๋”๋ง ์ตœ์ ํ™”
259
  video = render_utils.render_video(
260
  outputs['gaussian'][0],
261
+ num_frames=30,
262
+ resolution=384
263
  )['color']
264
 
265
  video_geo = render_utils.render_video(
 
273
  video_geo = [v.cpu().numpy() for v in video_geo]
274
  clear_gpu_memory()
275
 
276
+ # ๋น„๋””์˜ค ์ƒ์„ฑ ๋ฐ ์ €์žฅ
277
  video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
278
  new_trial_id = str(uuid.uuid4())
279
  video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
 
289
 
290
  except Exception as e:
291
  print(f"Error in image_to_3d: {str(e)}")
292
+ if hasattr(g.trellis_pipeline, 'to'):
293
+ g.trellis_pipeline.to('cpu')
294
  clear_gpu_memory()
295
  return None, None
296
 
 
 
 
 
 
 
 
 
 
 
297
  def clear_gpu_memory():
298
  """GPU ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ๋” ์ฒ ์ €ํ•˜๊ฒŒ ์ •๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜"""
299
  try:
 
363
  return any(ord('๊ฐ€') <= ord(c) <= ord('ํžฃ') for c in text)
364
 
365
  if contains_korean(prompt):
 
366
  translated = g.translator(prompt)[0]['translation_text']
367
  prompt = translated
368
  print(f"Translated prompt: {prompt}")
 
403
  torch.cuda.empty_cache()
404
  torch.cuda.synchronize()
405
  gc.collect()
 
 
 
 
 
 
 
 
 
 
406
 
407
  css = """
408
  footer {