aiqtech commited on
Commit
9f57959
ยท
verified ยท
1 Parent(s): 53f998b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -39
app.py CHANGED
@@ -22,14 +22,14 @@ os.makedirs(TMP_DIR, exist_ok=True)
22
 
23
 
24
  # ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ จ ํ™˜๊ฒฝ ๋ณ€์ˆ˜
25
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32' # ๋” ์ž‘์€ ๊ฐ’์œผ๋กœ ์„ค์ •
26
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
27
  os.environ['TORCH_HOME'] = '/tmp/torch_home'
28
  os.environ['HF_HOME'] = '/tmp/huggingface'
29
  os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
30
  os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
31
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
32
- os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
33
 
34
  def initialize_models():
35
  global pipeline, translator, flux_pipe
@@ -40,25 +40,17 @@ def initialize_models():
40
  # ๋ฉ”๋ชจ๋ฆฌ ์„ค์ •
41
  torch.backends.cudnn.benchmark = False
42
  torch.backends.cudnn.deterministic = True
43
- torch.backends.cuda.matmul.allow_tf32 = False
44
- torch.backends.cudnn.allow_tf32 = False
45
-
46
- # ์บ์‹œ ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ ๋ฐ ์ •๋ฆฌ
47
- for dir_path in ['/tmp/transformers_cache', '/tmp/torch_home', '/tmp/huggingface', '/tmp/cache']:
48
- os.makedirs(dir_path, exist_ok=True)
49
- for file in os.listdir(dir_path):
50
- try:
51
- os.remove(os.path.join(dir_path, file))
52
- except:
53
- pass
54
 
 
55
  # Trellis ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
56
  pipeline = TrellisImageTo3DPipeline.from_pretrained(
57
- "JeffreyXiang/TRELLIS-image-large",
58
- device_map="auto",
59
- torch_dtype=torch.float16 # ๋ฐ˜์ •๋ฐ€๋„ ์‚ฌ์šฉ
60
  )
61
 
 
 
 
 
62
  # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
63
  translator = translation_pipeline(
64
  "translation",
@@ -66,14 +58,17 @@ def initialize_models():
66
  device="cpu"
67
  )
68
 
 
 
 
 
69
  flux_pipe = None
70
- free_memory()
71
  print("Models initialized successfully")
72
  return True
73
 
74
  except Exception as e:
75
  print(f"Model initialization error: {str(e)}")
76
- free_memory()
77
  return False
78
 
79
  def get_flux_pipe():
@@ -84,11 +79,8 @@ def get_flux_pipe():
84
  free_memory()
85
  flux_pipe = FluxPipeline.from_pretrained(
86
  "black-forest-labs/FLUX.1-dev",
87
- device_map="auto",
88
- torch_dtype=torch.float16,
89
- variant="fp16",
90
- use_safetensors=True,
91
- low_cpu_mem_usage=True
92
  )
93
  except Exception as e:
94
  print(f"Error loading Flux pipeline: {e}")
@@ -142,6 +134,9 @@ def translate_if_korean(text):
142
  @spaces.GPU
143
  def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
144
  try:
 
 
 
145
  trial_id = str(uuid.uuid4())
146
 
147
  # ์ด๋ฏธ์ง€๊ฐ€ ๋„ˆ๋ฌด ์ž‘์€ ๊ฒฝ์šฐ ํฌ๊ธฐ ์กฐ์ •
@@ -150,10 +145,18 @@ def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
150
  ratio = min_size / min(image.size)
151
  new_size = tuple(int(dim * ratio) for dim in image.size)
152
  image = image.resize(new_size, Image.LANCZOS)
 
 
 
 
 
 
 
 
153
 
154
- processed_image = pipeline.preprocess_image(image)
155
- processed_image.save(f"{TMP_DIR}/{trial_id}.png")
156
- return trial_id, processed_image
157
 
158
  except Exception as e:
159
  print(f"Error in preprocess_image: {str(e)}")
@@ -439,21 +442,52 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
439
 
440
  if __name__ == "__main__":
441
  import warnings
442
- warnings.filterwarnings('ignore') # ๊ฒฝ๊ณ  ๋ฉ”์‹œ์ง€ ๋ฌด์‹œ
 
 
 
443
 
 
444
  free_memory()
445
 
446
- if not initialize_models():
447
- print("Failed to initialize models")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448
  exit(1)
449
 
 
 
 
 
 
 
 
 
450
  # Gradio ์•ฑ ์‹คํ–‰
451
- demo.queue(max_size=1).launch( # ํ ํฌ๊ธฐ๋ฅผ 1๋กœ ์ œํ•œ
452
- share=True,
453
- max_threads=1,
454
- show_error=True,
455
- enable_queue=True,
456
- server_port=7860,
457
- server_name="0.0.0.0",
458
- quiet=True # ๋กœ๊ทธ ์ถœ๋ ฅ ์ตœ์†Œํ™”
459
- )
 
 
 
 
22
 
23
 
24
  # ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ จ ํ™˜๊ฒฝ ๋ณ€์ˆ˜
25
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32'
26
  os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
27
  os.environ['TORCH_HOME'] = '/tmp/torch_home'
28
  os.environ['HF_HOME'] = '/tmp/huggingface'
29
  os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
30
  os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
31
+ os.environ['SPCONV_ALGO'] = 'native'
32
+ os.environ['WARP_USE_CPU'] = '1'
33
 
34
  def initialize_models():
35
  global pipeline, translator, flux_pipe
 
40
  # ๋ฉ”๋ชจ๋ฆฌ ์„ค์ •
41
  torch.backends.cudnn.benchmark = False
42
  torch.backends.cudnn.deterministic = True
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ print("Initializing Trellis pipeline...")
45
  # Trellis ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
46
  pipeline = TrellisImageTo3DPipeline.from_pretrained(
47
+ "JeffreyXiang/TRELLIS-image-large"
 
 
48
  )
49
 
50
+ if pipeline is None:
51
+ raise Exception("Failed to initialize Trellis pipeline")
52
+
53
+ print("Initializing translator...")
54
  # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
55
  translator = translation_pipeline(
56
  "translation",
 
58
  device="cpu"
59
  )
60
 
61
+ if translator is None:
62
+ raise Exception("Failed to initialize translator")
63
+
64
+ # Flux ํŒŒ์ดํ”„๋ผ์ธ์€ ๋‚˜์ค‘์— ์ดˆ๊ธฐํ™”
65
  flux_pipe = None
66
+
67
  print("Models initialized successfully")
68
  return True
69
 
70
  except Exception as e:
71
  print(f"Model initialization error: {str(e)}")
 
72
  return False
73
 
74
  def get_flux_pipe():
 
79
  free_memory()
80
  flux_pipe = FluxPipeline.from_pretrained(
81
  "black-forest-labs/FLUX.1-dev",
82
+ torch_dtype=torch.float32, # CPU ๋ชจ๋“œ๋กœ ์‹œ์ž‘
83
+ use_safetensors=True
 
 
 
84
  )
85
  except Exception as e:
86
  print(f"Error loading Flux pipeline: {e}")
 
134
  @spaces.GPU
135
  def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
136
  try:
137
+ if pipeline is None:
138
+ raise Exception("Pipeline not initialized")
139
+
140
  trial_id = str(uuid.uuid4())
141
 
142
  # ์ด๋ฏธ์ง€๊ฐ€ ๋„ˆ๋ฌด ์ž‘์€ ๊ฒฝ์šฐ ํฌ๊ธฐ ์กฐ์ •
 
145
  ratio = min_size / min(image.size)
146
  new_size = tuple(int(dim * ratio) for dim in image.size)
147
  image = image.resize(new_size, Image.LANCZOS)
148
+
149
+ try:
150
+ processed_image = pipeline.preprocess_image(image)
151
+ if processed_image is None:
152
+ raise Exception("Failed to process image")
153
+
154
+ processed_image.save(f"{TMP_DIR}/{trial_id}.png")
155
+ return trial_id, processed_image
156
 
157
+ except Exception as e:
158
+ print(f"Error in image preprocessing: {str(e)}")
159
+ return None, None
160
 
161
  except Exception as e:
162
  print(f"Error in preprocess_image: {str(e)}")
 
442
 
443
  if __name__ == "__main__":
444
  import warnings
445
+ warnings.filterwarnings('ignore')
446
+
447
+ # ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ
448
+ os.makedirs(TMP_DIR, exist_ok=True)
449
 
450
+ # ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
451
  free_memory()
452
 
453
+ # ๋ชจ๋ธ ์ดˆ๊ธฐํ™” ์‹œ๋„
454
+ retry_count = 3
455
+ initialized = False
456
+
457
+ for i in range(retry_count):
458
+ try:
459
+ if initialize_models():
460
+ initialized = True
461
+ break
462
+ else:
463
+ print(f"Initialization attempt {i+1} failed, retrying...")
464
+ free_memory()
465
+ except Exception as e:
466
+ print(f"Error during initialization attempt {i+1}: {str(e)}")
467
+ free_memory()
468
+
469
+ if not initialized:
470
+ print("Failed to initialize models after multiple attempts")
471
  exit(1)
472
 
473
+ try:
474
+ # rembg ์‚ฌ์ „ ๋กœ๋“œ ์‹œ๋„
475
+ test_image = Image.fromarray(np.ones((32, 32, 3), dtype=np.uint8) * 255)
476
+ if pipeline is not None:
477
+ pipeline.preprocess_image(test_image)
478
+ except Exception as e:
479
+ print(f"Warning: Failed to preload rembg: {str(e)}")
480
+
481
  # Gradio ์•ฑ ์‹คํ–‰
482
+ try:
483
+ demo.queue(max_size=1).launch(
484
+ share=True,
485
+ max_threads=1,
486
+ show_error=True,
487
+ server_port=7860,
488
+ server_name="0.0.0.0",
489
+ quiet=True
490
+ )
491
+ except Exception as e:
492
+ print(f"Error launching Gradio app: {str(e)}")
493
+ exit(1)