Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -20,11 +20,12 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
20 |
TMP_DIR = "/tmp/Trellis-demo"
|
21 |
os.makedirs(TMP_DIR, exist_ok=True)
|
22 |
|
23 |
-
# GPU ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
24 |
-
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:
|
25 |
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
26 |
-
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
27 |
os.environ['PYTORCH_NO_CUDA_MEMORY_CACHING'] = '1'
|
|
|
28 |
|
29 |
def initialize_models():
|
30 |
global pipeline, translator, flux_pipe
|
@@ -38,13 +39,15 @@ def initialize_models():
|
|
38 |
|
39 |
print("Initializing Trellis pipeline...")
|
40 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
41 |
-
"JeffreyXiang/TRELLIS-image-large"
|
42 |
-
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
|
43 |
)
|
44 |
|
45 |
if torch.cuda.is_available():
|
46 |
pipeline = pipeline.to("cuda")
|
47 |
-
|
|
|
|
|
|
|
48 |
|
49 |
print("Initializing translator...")
|
50 |
translator = translation_pipeline(
|
@@ -448,21 +451,15 @@ if __name__ == "__main__":
|
|
448 |
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
449 |
free_memory()
|
450 |
|
451 |
-
# ๋ชจ๋ธ ์ด๊ธฐํ
|
452 |
-
|
453 |
-
|
454 |
-
if initialize_models():
|
455 |
-
break
|
456 |
-
print(f"Attempt {attempt + 1} failed, retrying...")
|
457 |
-
free_memory()
|
458 |
-
else:
|
459 |
-
print("Failed to initialize models after multiple attempts")
|
460 |
exit(1)
|
461 |
|
462 |
# Gradio ์ฑ ์คํ
|
463 |
-
demo.queue(max_size=
|
464 |
share=True,
|
465 |
-
max_threads=
|
466 |
show_error=True,
|
467 |
server_port=7860,
|
468 |
server_name="0.0.0.0"
|
|
|
20 |
TMP_DIR = "/tmp/Trellis-demo"
|
21 |
os.makedirs(TMP_DIR, exist_ok=True)
|
22 |
|
23 |
+
# GPU ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
24 |
+
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128' # ๋ ์์ ๊ฐ์ผ๋ก ์ค์
|
25 |
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
26 |
+
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
27 |
os.environ['PYTORCH_NO_CUDA_MEMORY_CACHING'] = '1'
|
28 |
+
os.environ['CUDA_CACHE_DISABLE'] = '1'
|
29 |
|
30 |
def initialize_models():
|
31 |
global pipeline, translator, flux_pipe
|
|
|
39 |
|
40 |
print("Initializing Trellis pipeline...")
|
41 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
42 |
+
"JeffreyXiang/TRELLIS-image-large"
|
|
|
43 |
)
|
44 |
|
45 |
if torch.cuda.is_available():
|
46 |
pipeline = pipeline.to("cuda")
|
47 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ๋ฅผ ์ํ ์ค์
|
48 |
+
for param in pipeline.parameters():
|
49 |
+
if param.dtype == torch.float32:
|
50 |
+
param.data = param.data.to(torch.float16)
|
51 |
|
52 |
print("Initializing translator...")
|
53 |
translator = translation_pipeline(
|
|
|
451 |
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
452 |
free_memory()
|
453 |
|
454 |
+
# ๋ชจ๋ธ ์ด๊ธฐํ
|
455 |
+
if not initialize_models():
|
456 |
+
print("Failed to initialize models")
|
|
|
|
|
|
|
|
|
|
|
|
|
457 |
exit(1)
|
458 |
|
459 |
# Gradio ์ฑ ์คํ
|
460 |
+
demo.queue(max_size=1).launch(
|
461 |
share=True,
|
462 |
+
max_threads=2, # ์ค๋ ๋ ์ ๊ฐ์
|
463 |
show_error=True,
|
464 |
server_port=7860,
|
465 |
server_name="0.0.0.0"
|