aiqtech commited on
Commit
53f998b
ยท
verified ยท
1 Parent(s): b14b10a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -54
app.py CHANGED
@@ -20,26 +20,43 @@ MAX_SEED = np.iinfo(np.int32).max
20
  TMP_DIR = "/tmp/Trellis-demo"
21
  os.makedirs(TMP_DIR, exist_ok=True)
22
 
 
23
  # ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ จ ํ™˜๊ฒฝ ๋ณ€์ˆ˜
24
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
25
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
26
  os.environ['TORCH_HOME'] = '/tmp/torch_home'
27
  os.environ['HF_HOME'] = '/tmp/huggingface'
28
  os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
29
- os.environ['SPCONV_ALGO'] = 'native'
30
- os.environ['WARP_USE_CPU'] = '1'
 
31
 
32
  def initialize_models():
33
  global pipeline, translator, flux_pipe
34
 
35
  try:
36
- # ์บ์‹œ ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ
 
 
 
 
 
 
 
 
37
  for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
38
  os.makedirs(dir_path, exist_ok=True)
 
 
 
 
 
39
 
40
  # Trellis ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
41
  pipeline = TrellisImageTo3DPipeline.from_pretrained(
42
- "JeffreyXiang/TRELLIS-image-large"
 
 
43
  )
44
 
45
  # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
@@ -49,49 +66,64 @@ def initialize_models():
49
  device="cpu"
50
  )
51
 
52
- # Flux ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” (์ฆ‰์‹œ ๋กœ๋“œ)
53
- flux_pipe = FluxPipeline.from_pretrained(
54
- "black-forest-labs/FLUX.1-dev",
55
- torch_dtype=torch.float32
56
- )
57
-
58
  print("Models initialized successfully")
59
  return True
60
 
61
  except Exception as e:
62
  print(f"Model initialization error: {str(e)}")
 
63
  return False
64
 
65
- def load_flux_pipe():
66
- """Flux ํŒŒ์ดํ”„๋ผ์ธ์„ ํ•„์š”ํ•  ๋•Œ๋งŒ ๋กœ๋“œ"""
67
  global flux_pipe
68
  if flux_pipe is None:
69
- flux_pipe = FluxPipeline.from_pretrained(
70
- "black-forest-labs/FLUX.1-dev",
71
- torch_dtype=torch.float32
72
- )
 
 
 
 
 
 
 
 
 
73
  return flux_pipe
74
 
 
75
  def free_memory():
76
- """๋ฉ”๋ชจ๋ฆฌ๋ฅผ ์ •๋ฆฌํ•˜๋Š” ์œ ํ‹ธ๋ฆฌํ‹ฐ ํ•จ์ˆ˜"""
77
  import gc
 
 
 
78
  gc.collect()
79
 
 
80
  if torch.cuda.is_available():
81
- with torch.cuda.device('cuda'):
82
- torch.cuda.empty_cache()
83
 
84
  # ์ž„์‹œ ํŒŒ์ผ ์ •๋ฆฌ
85
- for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
 
 
 
86
  if os.path.exists(dir_path):
87
- for file in os.listdir(dir_path):
88
- file_path = os.path.join(dir_path, file)
89
- try:
90
  if os.path.isfile(file_path):
91
- os.unlink(file_path)
92
- except Exception as e:
93
- print(f'Error deleting {file_path}: {e}')
94
-
 
 
95
 
96
  @spaces.GPU
97
  def setup_gpu_model(model):
@@ -230,11 +262,12 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
230
  @spaces.GPU
231
  def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
232
  try:
233
- global flux_pipe
234
 
235
- if torch.cuda.is_available():
236
- flux_pipe = flux_pipe.to("cuda")
237
- flux_pipe = flux_pipe.to(torch.float16)
 
238
 
239
  # ์ด๋ฏธ์ง€ ํฌ๊ธฐ ์ œํ•œ
240
  height = min(height, 512)
@@ -245,28 +278,22 @@ def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
245
  translated_prompt = translate_if_korean(prompt)
246
  final_prompt = f"{translated_prompt}, {base_prompt}"
247
 
248
- print(f"Generating image with prompt: {final_prompt}")
249
-
250
- with torch.inference_mode():
251
  output = flux_pipe(
252
  prompt=[final_prompt],
253
  height=height,
254
  width=width,
255
- guidance_scale=min(guidance_scale, 10.0),
256
- num_inference_steps=min(num_steps, 30)
257
  )
258
-
259
  image = output.images[0]
260
 
261
- if torch.cuda.is_available():
262
- flux_pipe = flux_pipe.to("cpu")
263
-
264
  return image
265
 
266
  except Exception as e:
267
  print(f"Error in generate_image_from_text: {str(e)}")
268
- if torch.cuda.is_available():
269
- flux_pipe = flux_pipe.to("cpu")
270
  raise e
271
 
272
  @spaces.GPU
@@ -411,24 +438,22 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
411
 
412
 
413
  if __name__ == "__main__":
414
- # ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
 
 
415
  free_memory()
416
 
417
- # ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
418
  if not initialize_models():
419
  print("Failed to initialize models")
420
  exit(1)
421
 
422
- try:
423
- # rembg ์‚ฌ์ „ ๋กœ๋“œ ์‹œ๋„ (๋งค์šฐ ์ž‘์€ ์ด๋ฏธ์ง€๋กœ)
424
- test_image = Image.fromarray(np.ones((32, 32, 3), dtype=np.uint8) * 255)
425
- pipeline.preprocess_image(test_image)
426
- except Exception as e:
427
- print(f"Warning: Failed to preload rembg: {str(e)}")
428
-
429
  # Gradio ์•ฑ ์‹คํ–‰
430
- demo.queue(max_size=3).launch(
431
  share=True,
432
  max_threads=1,
433
- show_error=True
 
 
 
 
434
  )
 
20
  TMP_DIR = "/tmp/Trellis-demo"
21
  os.makedirs(TMP_DIR, exist_ok=True)
22
 
23
+
24
  # ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ จ ํ™˜๊ฒฝ ๋ณ€์ˆ˜
25
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32' # ๋” ์ž‘์€ ๊ฐ’์œผ๋กœ ์„ค์ •
26
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
27
  os.environ['TORCH_HOME'] = '/tmp/torch_home'
28
  os.environ['HF_HOME'] = '/tmp/huggingface'
29
  os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
30
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
31
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
32
+ os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
33
 
34
  def initialize_models():
35
  global pipeline, translator, flux_pipe
36
 
37
  try:
38
+ import torch
39
+
40
+ # ๋ฉ”๋ชจ๋ฆฌ ์„ค์ •
41
+ torch.backends.cudnn.benchmark = False
42
+ torch.backends.cudnn.deterministic = True
43
+ torch.backends.cuda.matmul.allow_tf32 = False
44
+ torch.backends.cudnn.allow_tf32 = False
45
+
46
+ # ์บ์‹œ ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ ๋ฐ ์ •๋ฆฌ
47
  for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
48
  os.makedirs(dir_path, exist_ok=True)
49
+ for file in os.listdir(dir_path):
50
+ try:
51
+ os.remove(os.path.join(dir_path, file))
52
+ except:
53
+ pass
54
 
55
  # Trellis ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
56
  pipeline = TrellisImageTo3DPipeline.from_pretrained(
57
+ "JeffreyXiang/TRELLIS-image-large",
58
+ device_map="auto",
59
+ torch_dtype=torch.float16 # ๋ฐ˜์ •๋ฐ€๋„ ์‚ฌ์šฉ
60
  )
61
 
62
  # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
 
66
  device="cpu"
67
  )
68
 
69
+ flux_pipe = None
70
+ free_memory()
 
 
 
 
71
  print("Models initialized successfully")
72
  return True
73
 
74
  except Exception as e:
75
  print(f"Model initialization error: {str(e)}")
76
+ free_memory()
77
  return False
78
 
79
+ def get_flux_pipe():
80
+ """Flux ํŒŒ์ดํ”„๋ผ์ธ์„ ํ•„์š”ํ•  ๋•Œ๋งŒ ๋กœ๋“œํ•˜๋Š” ํ•จ์ˆ˜"""
81
  global flux_pipe
82
  if flux_pipe is None:
83
+ try:
84
+ free_memory()
85
+ flux_pipe = FluxPipeline.from_pretrained(
86
+ "black-forest-labs/FLUX.1-dev",
87
+ device_map="auto",
88
+ torch_dtype=torch.float16,
89
+ variant="fp16",
90
+ use_safetensors=True,
91
+ low_cpu_mem_usage=True
92
+ )
93
+ except Exception as e:
94
+ print(f"Error loading Flux pipeline: {e}")
95
+ return None
96
  return flux_pipe
97
 
98
+
99
  def free_memory():
100
+ """๊ฐ•ํ™”๋œ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ ํ•จ์ˆ˜"""
101
  import gc
102
+ import os
103
+
104
+ # Python ๊ฐ€๋น„์ง€ ์ปฌ๋ ‰์…˜
105
  gc.collect()
106
 
107
+ # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
108
  if torch.cuda.is_available():
109
+ torch.cuda.empty_cache()
 
110
 
111
  # ์ž„์‹œ ํŒŒ์ผ ์ •๋ฆฌ
112
+ tmp_dirs = ['/tmp/transformers_cache', '/tmp/torch_home',
113
+ '/tmp/huggingface', '/tmp/cache']
114
+
115
+ for dir_path in tmp_dirs:
116
  if os.path.exists(dir_path):
117
+ try:
118
+ for file in os.listdir(dir_path):
119
+ file_path = os.path.join(dir_path, file)
120
  if os.path.isfile(file_path):
121
+ try:
122
+ os.unlink(file_path)
123
+ except:
124
+ pass
125
+ except:
126
+ pass
127
 
128
  @spaces.GPU
129
  def setup_gpu_model(model):
 
262
  @spaces.GPU
263
  def generate_image_from_text(prompt, height, width, guidance_scale, num_steps):
264
  try:
265
+ free_memory() # ์‹œ์ž‘ ์ „ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
266
 
267
+ # Flux ํŒŒ์ดํ”„๋ผ์ธ ๊ฐ€์ ธ์˜ค๊ธฐ
268
+ flux_pipe = get_flux_pipe()
269
+ if flux_pipe is None:
270
+ raise Exception("Failed to load Flux pipeline")
271
 
272
  # ์ด๋ฏธ์ง€ ํฌ๊ธฐ ์ œํ•œ
273
  height = min(height, 512)
 
278
  translated_prompt = translate_if_korean(prompt)
279
  final_prompt = f"{translated_prompt}, {base_prompt}"
280
 
281
+ with torch.inference_mode(), torch.cuda.amp.autocast():
 
 
282
  output = flux_pipe(
283
  prompt=[final_prompt],
284
  height=height,
285
  width=width,
286
+ guidance_scale=min(guidance_scale, 7.5), # ๋‚ฎ์€ ๊ฐ’์œผ๋กœ ์ œํ•œ
287
+ num_inference_steps=min(num_steps, 20) # ์Šคํ… ์ˆ˜ ์ œํ•œ
288
  )
 
289
  image = output.images[0]
290
 
291
+ free_memory() # ์™„๋ฃŒ ํ›„ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
 
 
292
  return image
293
 
294
  except Exception as e:
295
  print(f"Error in generate_image_from_text: {str(e)}")
296
+ free_memory()
 
297
  raise e
298
 
299
  @spaces.GPU
 
438
 
439
 
440
  if __name__ == "__main__":
441
+ import warnings
442
+ warnings.filterwarnings('ignore') # ๊ฒฝ๊ณ  ๋ฉ”์‹œ์ง€ ๋ฌด์‹œ
443
+
444
  free_memory()
445
 
 
446
  if not initialize_models():
447
  print("Failed to initialize models")
448
  exit(1)
449
 
 
 
 
 
 
 
 
450
  # Gradio ์•ฑ ์‹คํ–‰
451
+ demo.queue(max_size=1).launch( # ํ ํฌ๊ธฐ๋ฅผ 1๋กœ ์ œํ•œ
452
  share=True,
453
  max_threads=1,
454
+ show_error=True,
455
+ enable_queue=True,
456
+ server_port=7860,
457
+ server_name="0.0.0.0",
458
+ quiet=True # ๋กœ๊ทธ ์ถœ๋ ฅ ์ตœ์†Œํ™”
459
  )