Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,6 @@ import torch
|
|
6 |
import random
|
7 |
import spaces
|
8 |
import gradio as gr
|
9 |
-
print(gr.__version__)
|
10 |
import numpy as np
|
11 |
|
12 |
from PIL import Image, ImageCms
|
@@ -16,8 +15,6 @@ from diffusers.utils import load_image
|
|
16 |
from pipeline_flux_control_removal import FluxControlRemovalPipeline
|
17 |
|
18 |
torch.set_grad_enabled(False)
|
19 |
-
device = "cuda"
|
20 |
-
print(device)
|
21 |
image_path = mask_path = None
|
22 |
image_examples = [...]
|
23 |
image_path = mask_path =None
|
@@ -52,6 +49,7 @@ image_examples = [
|
|
52 |
]
|
53 |
|
54 |
]
|
|
|
55 |
@spaces.GPU(duration=120)
|
56 |
def load_model(base_model_path, lora_path):
|
57 |
global pipe
|
@@ -80,13 +78,12 @@ def load_model(base_model_path, lora_path):
|
|
80 |
base_model_path,
|
81 |
transformer=transformer,
|
82 |
torch_dtype=torch.bfloat16
|
83 |
-
).to(
|
84 |
pipe.transformer.to(torch.bfloat16)
|
85 |
gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
|
86 |
gr.Info(str(f"Inject LoRA: {lora_path}"))
|
87 |
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors")
|
88 |
gr.Info(str(f"Model loading: {int((100 / 100) * 100)}%"))
|
89 |
-
|
90 |
@spaces.GPU(duration=120)
|
91 |
def set_seed(seed):
|
92 |
torch.manual_seed(seed)
|
@@ -95,7 +92,7 @@ def set_seed(seed):
|
|
95 |
np.random.seed(seed)
|
96 |
random.seed(seed)
|
97 |
|
98 |
-
@spaces.GPU(duration=120)
|
99 |
def predict(
|
100 |
input_image,
|
101 |
prompt,
|
@@ -276,7 +273,7 @@ with gr.Blocks(
|
|
276 |
),
|
277 |
title="Omnieraser"
|
278 |
) as demo:
|
279 |
-
base_model_path =
|
280 |
lora_path = 'theSure/Omnieraser'
|
281 |
load_model(base_model_path=base_model_path, lora_path=lora_path)
|
282 |
|
@@ -366,6 +363,6 @@ with gr.Blocks(
|
|
366 |
],
|
367 |
outputs=[inpaint_result, gallery]
|
368 |
)
|
369 |
-
|
370 |
|
371 |
demo.launch()
|
|
|
6 |
import random
|
7 |
import spaces
|
8 |
import gradio as gr
|
|
|
9 |
import numpy as np
|
10 |
|
11 |
from PIL import Image, ImageCms
|
|
|
15 |
from pipeline_flux_control_removal import FluxControlRemovalPipeline
|
16 |
|
17 |
torch.set_grad_enabled(False)
|
|
|
|
|
18 |
image_path = mask_path = None
|
19 |
image_examples = [...]
|
20 |
image_path = mask_path =None
|
|
|
49 |
]
|
50 |
|
51 |
]
|
52 |
+
|
53 |
@spaces.GPU(duration=120)
|
54 |
def load_model(base_model_path, lora_path):
|
55 |
global pipe
|
|
|
78 |
base_model_path,
|
79 |
transformer=transformer,
|
80 |
torch_dtype=torch.bfloat16
|
81 |
+
).to("cuda")
|
82 |
pipe.transformer.to(torch.bfloat16)
|
83 |
gr.Info(str(f"Model loading: {int((80 / 100) * 100)}%"))
|
84 |
gr.Info(str(f"Inject LoRA: {lora_path}"))
|
85 |
pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors")
|
86 |
gr.Info(str(f"Model loading: {int((100 / 100) * 100)}%"))
|
|
|
87 |
@spaces.GPU(duration=120)
|
88 |
def set_seed(seed):
|
89 |
torch.manual_seed(seed)
|
|
|
92 |
np.random.seed(seed)
|
93 |
random.seed(seed)
|
94 |
|
95 |
+
@spaces.GPU(duration=120)
|
96 |
def predict(
|
97 |
input_image,
|
98 |
prompt,
|
|
|
273 |
),
|
274 |
title="Omnieraser"
|
275 |
) as demo:
|
276 |
+
base_model_path = 'black-forest-labs/FLUX.1-dev'
|
277 |
lora_path = 'theSure/Omnieraser'
|
278 |
load_model(base_model_path=base_model_path, lora_path=lora_path)
|
279 |
|
|
|
363 |
],
|
364 |
outputs=[inpaint_result, gallery]
|
365 |
)
|
366 |
+
|
367 |
|
368 |
demo.launch()
|