ginipick commited on
Commit
aa47610
ยท
verified ยท
1 Parent(s): c0f2e23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -32
app.py CHANGED
@@ -32,6 +32,20 @@ def clear_memory():
32
  torch.cuda.synchronize()
33
  gc.collect()
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  # ์ž๋™ ํ˜ผํ•ฉ ์ •๋ฐ€๋„(Automatic Mixed Precision) ์„ค์ •
36
  if torch.cuda.is_available():
37
  scaler = torch.amp.GradScaler('cuda')
@@ -89,6 +103,7 @@ gd_model = gd_model.to(device=device)
89
  assert isinstance(gd_model, GroundingDinoForObjectDetection)
90
 
91
 
 
92
  # FLUX ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
93
  pipe = FluxPipeline.from_pretrained(
94
  "black-forest-labs/FLUX.1-dev",
@@ -110,9 +125,9 @@ pipe.fuse_lora(lora_scale=0.125)
110
  # GPU ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™”
111
  if torch.cuda.is_available():
112
  pipe.to("cuda")
113
- pipe.enable_vae_slicing() # VAE ์Šฌ๋ผ์ด์‹ฑ ํ™œ์„ฑํ™”
114
-
115
-
116
 
117
  os.environ["CUDA_VISIBLE_DEVICES"] = "0" # ๋‹จ์ผ GPU ์‚ฌ์šฉ
118
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512" # CUDA ๋ฉ”๋ชจ๋ฆฌ ํ• ๋‹น ์„ค์ •
@@ -203,40 +218,26 @@ def generate_background(prompt: str, aspect_ratio: str) -> Image.Image:
203
  height = int(height * ratio)
204
  width, height = adjust_size_to_multiple_of_8(width, height)
205
 
 
 
206
  with timer("Background generation"):
207
- with torch.inference_mode(): # inference_mode ์‚ฌ์šฉ
208
- with torch.cuda.amp.autocast():
209
- image = pipe(
210
- prompt=prompt,
211
- width=width,
212
- height=height,
213
- num_inference_steps=8,
214
- guidance_scale=4.0,
215
- max_length=77,
216
- ).images[0]
217
-
218
- clear_memory() # ์ฆ‰์‹œ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
219
- return image
220
  except Exception as e:
221
  print(f"Background generation error: {str(e)}")
 
222
  return Image.new('RGB', (512, 512), 'white')
223
 
224
- # FLUX ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™” ๋ถ€๋ถ„ ์ˆ˜์ •
225
- pipe = FluxPipeline.from_pretrained(
226
- "black-forest-labs/FLUX.1-dev",
227
- torch_dtype=torch.float32, # bfloat16 ๋Œ€์‹  float32 ์‚ฌ์šฉ
228
- use_auth_token=HF_TOKEN
229
- )
230
- pipe.load_lora_weights(
231
- hf_hub_download(
232
- "ByteDance/Hyper-SD",
233
- "Hyper-FLUX.1-dev-8steps-lora.safetensors",
234
- use_auth_token=HF_TOKEN
235
- )
236
- )
237
- pipe.fuse_lora(lora_scale=0.125)
238
- pipe.to(device=device)
239
-
240
 
241
  def create_position_grid():
242
  return """
 
32
  torch.cuda.synchronize()
33
  gc.collect()
34
 
35
+ # GPU ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ ์„ค์ •
36
+ if torch.cuda.is_available():
37
+ torch.cuda.empty_cache()
38
+ torch.backends.cudnn.benchmark = True
39
+ torch.backends.cuda.matmul.allow_tf32 = True
40
+
41
+ # ๋ฉ”๋ชจ๋ฆฌ ๋ถ„ํ•  ์„ค์ •
42
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = (
43
+ "max_split_size_mb:128,"
44
+ "garbage_collection_threshold:0.8,"
45
+ "memory_fraction:0.9"
46
+ )
47
+
48
+
49
  # ์ž๋™ ํ˜ผํ•ฉ ์ •๋ฐ€๋„(Automatic Mixed Precision) ์„ค์ •
50
  if torch.cuda.is_available():
51
  scaler = torch.amp.GradScaler('cuda')
 
103
  assert isinstance(gd_model, GroundingDinoForObjectDetection)
104
 
105
 
106
+ # FLUX ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
107
  # FLUX ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
108
  pipe = FluxPipeline.from_pretrained(
109
  "black-forest-labs/FLUX.1-dev",
 
125
  # GPU ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™”
126
  if torch.cuda.is_available():
127
  pipe.to("cuda")
128
+ # ๋ฉ”๋ชจ๋ฆฌ ์ตœ์ ํ™”๋ฅผ ์œ„ํ•œ ์ถ”๊ฐ€ ์„ค์ •
129
+ torch.backends.cudnn.benchmark = True
130
+ torch.backends.cuda.matmul.allow_tf32 = True
131
 
132
  os.environ["CUDA_VISIBLE_DEVICES"] = "0" # ๋‹จ์ผ GPU ์‚ฌ์šฉ
133
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512" # CUDA ๋ฉ”๋ชจ๋ฆฌ ํ• ๋‹น ์„ค์ •
 
218
  height = int(height * ratio)
219
  width, height = adjust_size_to_multiple_of_8(width, height)
220
 
221
+ clear_memory() # ์ƒ์„ฑ ์ „ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
222
+
223
  with timer("Background generation"):
224
+ with torch.inference_mode(), torch.cuda.amp.autocast():
225
+ image = pipe(
226
+ prompt=prompt,
227
+ width=width,
228
+ height=height,
229
+ num_inference_steps=8,
230
+ guidance_scale=4.0,
231
+ max_length=77,
232
+ ).images[0]
233
+
234
+ clear_memory() # ์ƒ์„ฑ ํ›„ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
235
+ return image
 
236
  except Exception as e:
237
  print(f"Background generation error: {str(e)}")
238
+ clear_memory() # ์˜ค๋ฅ˜ ๋ฐœ์ƒ ์‹œ์—๋„ ๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ
239
  return Image.new('RGB', (512, 512), 'white')
240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
242
  def create_position_grid():
243
  return """