Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -20,26 +20,43 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
20 |
TMP_DIR = "/tmp/Trellis-demo"
|
21 |
os.makedirs(TMP_DIR, exist_ok=True)
|
22 |
|
|
|
23 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
24 |
-
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:
|
25 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
26 |
os.environ['TORCH_HOME'] = '/tmp/torch_home'
|
27 |
os.environ['HF_HOME'] = '/tmp/huggingface'
|
28 |
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
29 |
-
os.environ['
|
30 |
-
os.environ['
|
|
|
31 |
|
32 |
def initialize_models():
|
33 |
global pipeline, translator, flux_pipe
|
34 |
|
35 |
try:
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
38 |
os.makedirs(dir_path, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
# Trellis ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
|
41 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
42 |
-
"JeffreyXiang/TRELLIS-image-large"
|
|
|
|
|
43 |
)
|
44 |
|
45 |
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
@@ -49,49 +66,64 @@ def initialize_models():
|
|
49 |
device="cpu"
|
50 |
)
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
"black-forest-labs/FLUX.1-dev",
|
55 |
-
torch_dtype=torch.float32
|
56 |
-
)
|
57 |
-
|
58 |
print("Models initialized successfully")
|
59 |
return True
|
60 |
|
61 |
except Exception as e:
|
62 |
print(f"Model initialization error: {str(e)}")
|
|
|
63 |
return False
|
64 |
|
65 |
-
def
|
66 |
-
"""Flux ํ์ดํ๋ผ์ธ์ ํ์ํ ๋๋ง
|
67 |
global flux_pipe
|
68 |
if flux_pipe is None:
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
return flux_pipe
|
74 |
|
|
|
75 |
def free_memory():
|
76 |
-
"""
|
77 |
import gc
|
|
|
|
|
|
|
78 |
gc.collect()
|
79 |
|
|
|
80 |
if torch.cuda.is_available():
|
81 |
-
|
82 |
-
torch.cuda.empty_cache()
|
83 |
|
84 |
# ์์ ํ์ผ ์ ๋ฆฌ
|
85 |
-
|
|
|
|
|
|
|
86 |
if os.path.exists(dir_path):
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
if os.path.isfile(file_path):
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
95 |
|
96 |
@spaces.GPU
|
97 |
def setup_gpu_model(model):
|
@@ -230,11 +262,12 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
230 |
@spaces.GPU
|
231 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
232 |
try:
|
233 |
-
|
234 |
|
235 |
-
|
236 |
-
|
237 |
-
|
|
|
238 |
|
239 |
# ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ
|
240 |
height = min(height, 512)
|
@@ -245,28 +278,22 @@ def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
|
245 |
translated_prompt = translate_if_korean(prompt)
|
246 |
final_prompt = f"{translated_prompt}, {base_prompt}"
|
247 |
|
248 |
-
|
249 |
-
|
250 |
-
with torch.inference_mode():
|
251 |
output = flux_pipe(
|
252 |
prompt=[final_prompt],
|
253 |
height=height,
|
254 |
width=width,
|
255 |
-
guidance_scale=min(guidance_scale,
|
256 |
-
num_inference_steps=min(num_steps,
|
257 |
)
|
258 |
-
|
259 |
image = output.images[0]
|
260 |
|
261 |
-
|
262 |
-
flux_pipe = flux_pipe.to("cpu")
|
263 |
-
|
264 |
return image
|
265 |
|
266 |
except Exception as e:
|
267 |
print(f"Error in generate_image_from_text: {str(e)}")
|
268 |
-
|
269 |
-
flux_pipe = flux_pipe.to("cpu")
|
270 |
raise e
|
271 |
|
272 |
@spaces.GPU
|
@@ -411,24 +438,22 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
|
|
411 |
|
412 |
|
413 |
if __name__ == "__main__":
|
414 |
-
|
|
|
|
|
415 |
free_memory()
|
416 |
|
417 |
-
# ๋ชจ๋ธ ์ด๊ธฐํ
|
418 |
if not initialize_models():
|
419 |
print("Failed to initialize models")
|
420 |
exit(1)
|
421 |
|
422 |
-
try:
|
423 |
-
# rembg ์ฌ์ ๋ก๋ ์๋ (๋งค์ฐ ์์ ์ด๋ฏธ์ง๋ก)
|
424 |
-
test_image = Image.fromarray(np.ones((32, 32, 3), dtype=np.uint8) * 255)
|
425 |
-
pipeline.preprocess_image(test_image)
|
426 |
-
except Exception as e:
|
427 |
-
print(f"Warning: Failed to preload rembg: {str(e)}")
|
428 |
-
|
429 |
# Gradio ์ฑ ์คํ
|
430 |
-
demo.queue(max_size=
|
431 |
share=True,
|
432 |
max_threads=1,
|
433 |
-
show_error=True
|
|
|
|
|
|
|
|
|
434 |
)
|
|
|
20 |
TMP_DIR = "/tmp/Trellis-demo"
|
21 |
os.makedirs(TMP_DIR, exist_ok=True)
|
22 |
|
23 |
+
|
24 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ จ ํ๊ฒฝ ๋ณ์
|
25 |
+
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32' # ๋ ์์ ๊ฐ์ผ๋ก ์ค์
|
26 |
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
27 |
os.environ['TORCH_HOME'] = '/tmp/torch_home'
|
28 |
os.environ['HF_HOME'] = '/tmp/huggingface'
|
29 |
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
30 |
+
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
31 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
32 |
+
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
|
33 |
|
34 |
def initialize_models():
|
35 |
global pipeline, translator, flux_pipe
|
36 |
|
37 |
try:
|
38 |
+
import torch
|
39 |
+
|
40 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ค์
|
41 |
+
torch.backends.cudnn.benchmark = False
|
42 |
+
torch.backends.cudnn.deterministic = True
|
43 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
44 |
+
torch.backends.cudnn.allow_tf32 = False
|
45 |
+
|
46 |
+
# ์บ์ ๋๋ ํ ๋ฆฌ ์์ฑ ๋ฐ ์ ๋ฆฌ
|
47 |
for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
|
48 |
os.makedirs(dir_path, exist_ok=True)
|
49 |
+
for file in os.listdir(dir_path):
|
50 |
+
try:
|
51 |
+
os.remove(os.path.join(dir_path, file))
|
52 |
+
except:
|
53 |
+
pass
|
54 |
|
55 |
# Trellis ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
|
56 |
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
57 |
+
"JeffreyXiang/TRELLIS-image-large",
|
58 |
+
device_map="auto",
|
59 |
+
torch_dtype=torch.float16 # ๋ฐ์ ๋ฐ๋ ์ฌ์ฉ
|
60 |
)
|
61 |
|
62 |
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
|
|
66 |
device="cpu"
|
67 |
)
|
68 |
|
69 |
+
flux_pipe = None
|
70 |
+
free_memory()
|
|
|
|
|
|
|
|
|
71 |
print("Models initialized successfully")
|
72 |
return True
|
73 |
|
74 |
except Exception as e:
|
75 |
print(f"Model initialization error: {str(e)}")
|
76 |
+
free_memory()
|
77 |
return False
|
78 |
|
79 |
+
def get_flux_pipe():
|
80 |
+
"""Flux ํ์ดํ๋ผ์ธ์ ํ์ํ ๋๋ง ๋ก๋ํ๋ ํจ์"""
|
81 |
global flux_pipe
|
82 |
if flux_pipe is None:
|
83 |
+
try:
|
84 |
+
free_memory()
|
85 |
+
flux_pipe = FluxPipeline.from_pretrained(
|
86 |
+
"black-forest-labs/FLUX.1-dev",
|
87 |
+
device_map="auto",
|
88 |
+
torch_dtype=torch.float16,
|
89 |
+
variant="fp16",
|
90 |
+
use_safetensors=True,
|
91 |
+
low_cpu_mem_usage=True
|
92 |
+
)
|
93 |
+
except Exception as e:
|
94 |
+
print(f"Error loading Flux pipeline: {e}")
|
95 |
+
return None
|
96 |
return flux_pipe
|
97 |
|
98 |
+
|
99 |
def free_memory():
|
100 |
+
"""๊ฐํ๋ ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ ํจ์"""
|
101 |
import gc
|
102 |
+
import os
|
103 |
+
|
104 |
+
# Python ๊ฐ๋น์ง ์ปฌ๋ ์
|
105 |
gc.collect()
|
106 |
|
107 |
+
# CUDA ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
108 |
if torch.cuda.is_available():
|
109 |
+
torch.cuda.empty_cache()
|
|
|
110 |
|
111 |
# ์์ ํ์ผ ์ ๋ฆฌ
|
112 |
+
tmp_dirs = ['/tmp/transformers_cache', '/tmp/torch_home',
|
113 |
+
'/tmp/huggingface', '/tmp/cache']
|
114 |
+
|
115 |
+
for dir_path in tmp_dirs:
|
116 |
if os.path.exists(dir_path):
|
117 |
+
try:
|
118 |
+
for file in os.listdir(dir_path):
|
119 |
+
file_path = os.path.join(dir_path, file)
|
120 |
if os.path.isfile(file_path):
|
121 |
+
try:
|
122 |
+
os.unlink(file_path)
|
123 |
+
except:
|
124 |
+
pass
|
125 |
+
except:
|
126 |
+
pass
|
127 |
|
128 |
@spaces.GPU
|
129 |
def setup_gpu_model(model):
|
|
|
262 |
@spaces.GPU
|
263 |
def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
|
264 |
try:
|
265 |
+
free_memory() # ์์ ์ ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
266 |
|
267 |
+
# Flux ํ์ดํ๋ผ์ธ ๊ฐ์ ธ์ค๊ธฐ
|
268 |
+
flux_pipe = get_flux_pipe()
|
269 |
+
if flux_pipe is None:
|
270 |
+
raise Exception("Failed to load Flux pipeline")
|
271 |
|
272 |
# ์ด๋ฏธ์ง ํฌ๊ธฐ ์ ํ
|
273 |
height = min(height, 512)
|
|
|
278 |
translated_prompt = translate_if_korean(prompt)
|
279 |
final_prompt = f"{translated_prompt}, {base_prompt}"
|
280 |
|
281 |
+
with torch.inference_mode(), torch.cuda.amp.autocast():
|
|
|
|
|
282 |
output = flux_pipe(
|
283 |
prompt=[final_prompt],
|
284 |
height=height,
|
285 |
width=width,
|
286 |
+
guidance_scale=min(guidance_scale, 7.5), # ๋ฎ์ ๊ฐ์ผ๋ก ์ ํ
|
287 |
+
num_inference_steps=min(num_steps, 20) # ์คํ
์ ์ ํ
|
288 |
)
|
|
|
289 |
image = output.images[0]
|
290 |
|
291 |
+
free_memory() # ์๋ฃ ํ ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
|
|
|
|
292 |
return image
|
293 |
|
294 |
except Exception as e:
|
295 |
print(f"Error in generate_image_from_text: {str(e)}")
|
296 |
+
free_memory()
|
|
|
297 |
raise e
|
298 |
|
299 |
@spaces.GPU
|
|
|
438 |
|
439 |
|
440 |
if __name__ == "__main__":
|
441 |
+
import warnings
|
442 |
+
warnings.filterwarnings('ignore') # ๊ฒฝ๊ณ ๋ฉ์์ง ๋ฌด์
|
443 |
+
|
444 |
free_memory()
|
445 |
|
|
|
446 |
if not initialize_models():
|
447 |
print("Failed to initialize models")
|
448 |
exit(1)
|
449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
450 |
# Gradio ์ฑ ์คํ
|
451 |
+
demo.queue(max_size=1).launch( # ํ ํฌ๊ธฐ๋ฅผ 1๋ก ์ ํ
|
452 |
share=True,
|
453 |
max_threads=1,
|
454 |
+
show_error=True,
|
455 |
+
enable_queue=True,
|
456 |
+
server_port=7860,
|
457 |
+
server_name="0.0.0.0",
|
458 |
+
quiet=True # ๋ก๊ทธ ์ถ๋ ฅ ์ต์ํ
|
459 |
)
|