aiqtech commited on
Commit
c3ac9a5
ยท
verified ยท
1 Parent(s): a7544c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -14
app.py CHANGED
@@ -21,10 +21,12 @@ TMP_DIR = "/tmp/Trellis-demo"
21
  os.makedirs(TMP_DIR, exist_ok=True)
22
 
23
 
24
- # GPU ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ จ ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์ˆ˜์ •
25
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512' # A100์— ๋งž๊ฒŒ ์ฆ๊ฐ€
26
- os.environ['CUDA_VISIBLE_DEVICES'] = '0' # ๋‹จ์ผ GPU ์‚ฌ์šฉ
27
- os.environ['CUDA_LAUNCH_BLOCKING'] = '0' # A100์—์„œ๋Š” ๋น„๋™๊ธฐ ์‹คํ–‰ ํ—ˆ์šฉ
 
 
28
 
29
  def initialize_models():
30
  global pipeline, translator, flux_pipe
@@ -44,15 +46,13 @@ def initialize_models():
44
 
45
  if torch.cuda.is_available():
46
  pipeline = pipeline.to("cuda")
47
- # ๋ชจ๋ธ์„ FP16์œผ๋กœ ๋ณ€ํ™˜
48
- for param in pipeline.parameters():
49
- param.data = param.data.half()
50
 
51
  print("Initializing translator...")
52
  translator = translation_pipeline(
53
  "translation",
54
  model="Helsinki-NLP/opus-mt-ko-en",
55
- device="cuda"
56
  )
57
 
58
  # Flux ํŒŒ์ดํ”„๋ผ์ธ์€ ๋‚˜์ค‘์— ์ดˆ๊ธฐํ™”
@@ -74,9 +74,10 @@ def get_flux_pipe():
74
  flux_pipe = FluxPipeline.from_pretrained(
75
  "black-forest-labs/FLUX.1-dev",
76
  use_safetensors=True
77
- ).to("cuda")
78
- # FP16์œผ๋กœ ๋ณ€ํ™˜
79
- flux_pipe.to(torch.float16)
 
80
  except Exception as e:
81
  print(f"Error loading Flux pipeline: {e}")
82
  return None
@@ -204,7 +205,7 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
204
  input_image = Image.open(f"{TMP_DIR}/{trial_id}.png")
205
 
206
  # L40S์— ๋งž๊ฒŒ ์ด๋ฏธ์ง€ ํฌ๊ธฐ ์ œํ•œ ์กฐ์ •
207
- max_size = 768 # L40S๋Š” ๋” ํฐ ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ๊ฐ€๋Šฅ
208
  if max(input_image.size) > max_size:
209
  ratio = max_size / max(input_image.size)
210
  input_image = input_image.resize(
@@ -216,14 +217,14 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
216
  if torch.cuda.is_available():
217
  pipeline.to("cuda")
218
 
219
- with torch.cuda.amp.autocast(): # ์ž๋™ ํ˜ผํ•ฉ ์ •๋ฐ€๋„ ์‚ฌ์šฉ
220
  outputs = pipeline.run(
221
  input_image,
222
  seed=seed,
223
  formats=["gaussian", "mesh"],
224
  preprocess_image=False,
225
  sparse_structure_sampler_params={
226
- "steps": min(ss_sampling_steps, 20), # L40S์—์„œ ๋” ๋งŽ์€ ์Šคํ… ํ—ˆ์šฉ
227
  "cfg_strength": ss_guidance_strength,
228
  },
229
  slat_sampler_params={
@@ -231,6 +232,10 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
231
  "cfg_strength": slat_guidance_strength,
232
  }
233
  )
 
 
 
 
234
 
235
  # ๋น„๋””์˜ค ์ƒ์„ฑ
236
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=40)['color']
 
21
  os.makedirs(TMP_DIR, exist_ok=True)
22
 
23
 
24
+ # GPU ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ จ ํ™˜๊ฒฝ ๋ณ€์ˆ˜
25
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
26
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
27
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '0'
28
+ os.environ['PYTORCH_NO_CUDA_MEMORY_CACHING'] = '1'
29
+ os.environ['CUDA_CACHE_DISABLE'] = '1'
30
 
31
  def initialize_models():
32
  global pipeline, translator, flux_pipe
 
46
 
47
  if torch.cuda.is_available():
48
  pipeline = pipeline.to("cuda")
49
+ # FP16 ๋ณ€ํ™˜์€ ์ œ๊ฑฐ (pipeline์ด ์ž์ฒด์ ์œผ๋กœ ์ฒ˜๋ฆฌ)
 
 
50
 
51
  print("Initializing translator...")
52
  translator = translation_pipeline(
53
  "translation",
54
  model="Helsinki-NLP/opus-mt-ko-en",
55
+ device="cuda" if torch.cuda.is_available() else "cpu"
56
  )
57
 
58
  # Flux ํŒŒ์ดํ”„๋ผ์ธ์€ ๋‚˜์ค‘์— ์ดˆ๊ธฐํ™”
 
74
  flux_pipe = FluxPipeline.from_pretrained(
75
  "black-forest-labs/FLUX.1-dev",
76
  use_safetensors=True
77
+ )
78
+ if torch.cuda.is_available():
79
+ flux_pipe = flux_pipe.to("cuda")
80
+ flux_pipe.enable_model_cpu_offload() # CPU ์˜คํ”„๋กœ๋”ฉ ํ™œ์„ฑํ™”
81
  except Exception as e:
82
  print(f"Error loading Flux pipeline: {e}")
83
  return None
 
205
  input_image = Image.open(f"{TMP_DIR}/{trial_id}.png")
206
 
207
  # L40S์— ๋งž๊ฒŒ ์ด๋ฏธ์ง€ ํฌ๊ธฐ ์ œํ•œ ์กฐ์ •
208
+ max_size = 768
209
  if max(input_image.size) > max_size:
210
  ratio = max_size / max(input_image.size)
211
  input_image = input_image.resize(
 
217
  if torch.cuda.is_available():
218
  pipeline.to("cuda")
219
 
220
+ try:
221
  outputs = pipeline.run(
222
  input_image,
223
  seed=seed,
224
  formats=["gaussian", "mesh"],
225
  preprocess_image=False,
226
  sparse_structure_sampler_params={
227
+ "steps": min(ss_sampling_steps, 20),
228
  "cfg_strength": ss_guidance_strength,
229
  },
230
  slat_sampler_params={
 
232
  "cfg_strength": slat_guidance_strength,
233
  }
234
  )
235
+ except RuntimeError as e:
236
+ print(f"Runtime error in pipeline.run: {str(e)}")
237
+ free_memory()
238
+ raise e
239
 
240
  # ๋น„๋””์˜ค ์ƒ์„ฑ
241
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=40)['color']