theSure commited on
Commit
374ad1a
·
verified ·
1 Parent(s): bd61158

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -15
app.py CHANGED
@@ -50,8 +50,9 @@ image_examples = [
50
 
51
  ]
52
 
53
- @spaces.GPU
54
  def load_model(base_model_path, lora_path):
 
55
  transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder='transformer', torch_dtype=torch.bfloat16)
56
  gr.Info(str(f"Model loading: {int((40 / 100) * 100)}%"))
57
  # enable image inputs
@@ -77,14 +78,13 @@ def load_model(base_model_path, lora_path):
77
  base_model_path,
78
  transformer=transformer,
79
  torch_dtype=torch.bfloat16
80
- )
81
  pipe.transformer.to(torch.bfloat16)
82
  gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
83
  gr.Info(str(f"Inject LoRA: {lora_path}"))
84
  pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors")
85
  gr.Info(str(f"Model loading: {int((100 / 100) * 100)}%"))
86
- return pipe
87
- @spaces.GPU
88
  def set_seed(seed):
89
  torch.manual_seed(seed)
90
  torch.cuda.manual_seed(seed)
@@ -92,9 +92,8 @@ def set_seed(seed):
92
  np.random.seed(seed)
93
  random.seed(seed)
94
 
95
- @spaces.GPU
96
  def predict(
97
- pipe,
98
  input_image,
99
  prompt,
100
  ddim_steps,
@@ -148,7 +147,6 @@ def predict(
148
  gray_image_pil = Image.fromarray(gray_image).convert('L')
149
  else:
150
  gray_image_pil = input_image["layers"][0]
151
- pipe.to("cuda")
152
  result = pipe(
153
  prompt=prompt,
154
  control_image=input_image["background"].convert("RGB"),
@@ -182,7 +180,6 @@ def predict(
182
 
183
 
184
  def infer(
185
- pipe,
186
  input_image,
187
  ddim_steps,
188
  seed,
@@ -192,8 +189,7 @@ def infer(
192
  ):
193
  img_path = image_path
194
  msk_path = mask_path
195
- return predict(pipe,
196
- input_image,
197
  removal_prompt,
198
  ddim_steps,
199
  seed,
@@ -279,8 +275,7 @@ with gr.Blocks(
279
  ) as demo:
280
  base_model_path = 'black-forest-labs/FLUX.1-dev'
281
  lora_path = 'theSure/Omnieraser'
282
- pipe = None
283
- pipe = load_model(base_model_path=base_model_path, lora_path=lora_path)
284
 
285
  ddim_steps = gr.Slider(visible=False, value=28)
286
  scale = gr.Slider(visible=False, value=3.5)
@@ -360,7 +355,6 @@ with gr.Blocks(
360
  run_button.click(
361
  fn=infer,
362
  inputs=[
363
- pipe,
364
  input_image,
365
  ddim_steps,
366
  seed,
@@ -370,5 +364,4 @@ with gr.Blocks(
370
  outputs=[inpaint_result, gallery]
371
  )
372
 
373
-
374
- demo.launch()
 
50
 
51
  ]
52
 
53
+ @spaces.GPU(enable_queue=True)
54
  def load_model(base_model_path, lora_path):
55
+ global pipe
56
  transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder='transformer', torch_dtype=torch.bfloat16)
57
  gr.Info(str(f"Model loading: {int((40 / 100) * 100)}%"))
58
  # enable image inputs
 
78
  base_model_path,
79
  transformer=transformer,
80
  torch_dtype=torch.bfloat16
81
+ ).to("cuda")
82
  pipe.transformer.to(torch.bfloat16)
83
  gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
84
  gr.Info(str(f"Inject LoRA: {lora_path}"))
85
  pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors")
86
  gr.Info(str(f"Model loading: {int((100 / 100) * 100)}%"))
87
+ @spaces.GPU(enable_queue=True)
 
88
  def set_seed(seed):
89
  torch.manual_seed(seed)
90
  torch.cuda.manual_seed(seed)
 
92
  np.random.seed(seed)
93
  random.seed(seed)
94
 
95
+ @spaces.GPU(enable_queue=True)
96
  def predict(
 
97
  input_image,
98
  prompt,
99
  ddim_steps,
 
147
  gray_image_pil = Image.fromarray(gray_image).convert('L')
148
  else:
149
  gray_image_pil = input_image["layers"][0]
 
150
  result = pipe(
151
  prompt=prompt,
152
  control_image=input_image["background"].convert("RGB"),
 
180
 
181
 
182
  def infer(
 
183
  input_image,
184
  ddim_steps,
185
  seed,
 
189
  ):
190
  img_path = image_path
191
  msk_path = mask_path
192
+ return predict(input_image,
 
193
  removal_prompt,
194
  ddim_steps,
195
  seed,
 
275
  ) as demo:
276
  base_model_path = 'black-forest-labs/FLUX.1-dev'
277
  lora_path = 'theSure/Omnieraser'
278
+ load_model(base_model_path=base_model_path, lora_path=lora_path)
 
279
 
280
  ddim_steps = gr.Slider(visible=False, value=28)
281
  scale = gr.Slider(visible=False, value=3.5)
 
355
  run_button.click(
356
  fn=infer,
357
  inputs=[
 
358
  input_image,
359
  ddim_steps,
360
  seed,
 
364
  outputs=[inpaint_result, gallery]
365
  )
366
 
367
+ demo.launch()