aiqtech commited on
Commit
b3a304c
ยท
verified ยท
1 Parent(s): b38c2bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -19
app.py CHANGED
@@ -38,24 +38,34 @@ def initialize_models():
38
  torch.backends.cudnn.allow_tf32 = True
39
 
40
  print("Initializing Trellis pipeline...")
41
- pipeline = TrellisImageTo3DPipeline.from_pretrained(
42
- "JeffreyXiang/TRELLIS-image-large"
43
- )
44
-
45
- if torch.cuda.is_available():
46
- pipeline = pipeline.to("cuda")
47
- # ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™”๋ฅผ ์œ„ํ•œ ์„ค์ •
48
- for param in pipeline.parameters():
49
- if param.dtype == torch.float32:
50
- param.data = param.data.to(torch.float16)
 
 
 
 
 
 
51
 
52
  print("Initializing translator...")
53
- translator = translation_pipeline(
54
- "translation",
55
- model="Helsinki-NLP/opus-mt-ko-en",
56
- device=0 if torch.cuda.is_available() else -1
57
- )
58
-
 
 
 
 
59
  flux_pipe = None
60
 
61
  print("Models initialized successfully")
@@ -63,6 +73,7 @@ def initialize_models():
63
 
64
  except Exception as e:
65
  print(f"Model initialization error: {str(e)}")
 
66
  return False
67
 
68
  def get_flux_pipe():
@@ -94,10 +105,11 @@ def free_memory():
94
  # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
95
  if torch.cuda.is_available():
96
  torch.cuda.empty_cache()
 
97
 
98
  # ์ž„์‹œ ํŒŒ์ผ ์ •๋ฆฌ
99
  tmp_dirs = ['/tmp/transformers_cache', '/tmp/torch_home',
100
- '/tmp/huggingface', '/tmp/cache']
101
 
102
  for dir_path in tmp_dirs:
103
  if os.path.exists(dir_path):
@@ -444,6 +456,9 @@ if __name__ == "__main__":
444
  if torch.cuda.is_available():
445
  print(f"Using GPU: {torch.cuda.get_device_name()}")
446
  print(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
 
 
 
447
 
448
  # ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ
449
  os.makedirs(TMP_DIR, exist_ok=True)
@@ -459,8 +474,9 @@ if __name__ == "__main__":
459
  # Gradio ์•ฑ ์‹คํ–‰
460
  demo.queue(max_size=1).launch(
461
  share=True,
462
- max_threads=2, # ์Šค๋ ˆ๋“œ ์ˆ˜ ๊ฐ์†Œ
463
  show_error=True,
464
  server_port=7860,
465
- server_name="0.0.0.0"
 
466
  )
 
38
  torch.backends.cudnn.allow_tf32 = True
39
 
40
  print("Initializing Trellis pipeline...")
41
+ try:
42
+ pipeline = TrellisImageTo3DPipeline.from_pretrained(
43
+ "JeffreyXiang/TRELLIS-image-large"
44
+ )
45
+
46
+ if pipeline is None:
47
+ raise ValueError("Pipeline initialization returned None")
48
+
49
+ if torch.cuda.is_available():
50
+ pipeline = pipeline.to("cuda")
51
+ # Half precision์œผ๋กœ ๋ณ€ํ™˜
52
+ pipeline = pipeline.half()
53
+
54
+ except Exception as e:
55
+ print(f"Error initializing Trellis pipeline: {str(e)}")
56
+ raise
57
 
58
  print("Initializing translator...")
59
+ try:
60
+ translator = translation_pipeline(
61
+ "translation",
62
+ model="Helsinki-NLP/opus-mt-ko-en",
63
+ device=0 if torch.cuda.is_available() else -1
64
+ )
65
+ except Exception as e:
66
+ print(f"Error initializing translator: {str(e)}")
67
+ raise
68
+
69
  flux_pipe = None
70
 
71
  print("Models initialized successfully")
 
73
 
74
  except Exception as e:
75
  print(f"Model initialization error: {str(e)}")
76
+ free_memory()
77
  return False
78
 
79
  def get_flux_pipe():
 
105
  # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
106
  if torch.cuda.is_available():
107
  torch.cuda.empty_cache()
108
+ torch.cuda.synchronize()
109
 
110
  # ์ž„์‹œ ํŒŒ์ผ ์ •๋ฆฌ
111
  tmp_dirs = ['/tmp/transformers_cache', '/tmp/torch_home',
112
+ '/tmp/huggingface', '/tmp/cache', TMP_DIR]
113
 
114
  for dir_path in tmp_dirs:
115
  if os.path.exists(dir_path):
 
456
  if torch.cuda.is_available():
457
  print(f"Using GPU: {torch.cuda.get_device_name()}")
458
  print(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
459
+
460
+ # CUDA ๋ฉ”๋ชจ๋ฆฌ ์„ค์ •
461
+ torch.cuda.set_per_process_memory_fraction(0.8) # GPU ๋ฉ”๋ชจ๋ฆฌ ์‚ฌ์šฉ๋Ÿ‰ ์ œํ•œ
462
 
463
  # ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ
464
  os.makedirs(TMP_DIR, exist_ok=True)
 
474
  # Gradio ์•ฑ ์‹คํ–‰
475
  demo.queue(max_size=1).launch(
476
  share=True,
477
+ max_threads=2,
478
  show_error=True,
479
  server_port=7860,
480
+ server_name="0.0.0.0",
481
+ enable_queue=True
482
  )