Spaces:
Runtime error
Runtime error
fix
Browse files
app.py
CHANGED
|
@@ -13,6 +13,10 @@ import os
|
|
| 13 |
import time
|
| 14 |
from gradio_imageslider import ImageSlider
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
MARKDOWN = """
|
| 18 |
# FLUX.1 Inpainting with lora
|
|
@@ -25,7 +29,7 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
| 25 |
|
| 26 |
login(token=HF_TOKEN)
|
| 27 |
|
| 28 |
-
|
| 29 |
|
| 30 |
class calculateDuration:
|
| 31 |
def __init__(self, activity_name=""):
|
|
@@ -64,8 +68,15 @@ def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
|
|
| 64 |
image.putdata(new_data)
|
| 65 |
return image
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
def resize_image_dimensions(
|
|
@@ -144,6 +155,7 @@ def process(
|
|
| 144 |
strength=strength_slider,
|
| 145 |
generator=generator,
|
| 146 |
num_inference_steps=num_inference_steps_slider,
|
|
|
|
| 147 |
joint_attention_kwargs={"scale": lora_scale},
|
| 148 |
).images[0]
|
| 149 |
|
|
@@ -190,14 +202,14 @@ with gr.Blocks() as demo:
|
|
| 190 |
max_lines=1,
|
| 191 |
placeholder="Enter your model path",
|
| 192 |
info="Currently, only LoRA hosted on Hugging Face'model can be loaded properly.",
|
| 193 |
-
value="XLabs-AI/flux-
|
| 194 |
)
|
| 195 |
lora_weights = gr.Textbox(
|
| 196 |
label="Lora weights",
|
| 197 |
show_label=True,
|
| 198 |
max_lines=1,
|
| 199 |
placeholder="Enter your lora weights name",
|
| 200 |
-
value="
|
| 201 |
)
|
| 202 |
lora_scale = gr.Slider(
|
| 203 |
label="Lora scale",
|
|
@@ -241,7 +253,7 @@ with gr.Blocks() as demo:
|
|
| 241 |
minimum=1,
|
| 242 |
maximum=50,
|
| 243 |
step=1,
|
| 244 |
-
value=
|
| 245 |
)
|
| 246 |
with gr.Column():
|
| 247 |
output_image_component = ImageSlider(label="Generate image", type="pil", slider_color="pink")
|
|
|
|
| 13 |
import time
|
| 14 |
from gradio_imageslider import ImageSlider
|
| 15 |
|
| 16 |
+
from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
|
| 17 |
+
from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
|
| 18 |
+
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
| 19 |
+
|
| 20 |
|
| 21 |
MARKDOWN = """
|
| 22 |
# FLUX.1 Inpainting with lora
|
|
|
|
| 29 |
|
| 30 |
login(token=HF_TOKEN)
|
| 31 |
|
| 32 |
+
bfl_repo="black-forest-labs/FLUX.1-dev"
|
| 33 |
|
| 34 |
class calculateDuration:
|
| 35 |
def __init__(self, activity_name=""):
|
|
|
|
| 68 |
image.putdata(new_data)
|
| 69 |
return image
|
| 70 |
|
| 71 |
+
# text_encoder = CLIPTextModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/clip_l.safetensors"), torch_dtype=dtype)
|
| 72 |
+
# tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=dtype)
|
| 73 |
+
# text_encoder_2 = T5EncoderModel.from_pretrained(os.path.join(os.getcwd(), "flux_text_encoders/t5xxl_fp8_e4m3fn.safetensors"), torch_dtype=dtype)
|
| 74 |
+
# tokenizer_2 = T5TokenizerFast.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype)
|
| 75 |
+
# vae = AutoencoderKL.from_pretrained(bfl_repo, subfolder="vae", torch_dtype=dtype)
|
| 76 |
+
# transformer = FluxTransformer2DModel.from_pretrained(bfl_repo, subfolder="transformer", torch_dtype=dtype)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
pipe = FluxInpaintPipeline.from_pretrained(bfl_repo, torch_dtype=torch.bfloat16).to(DEVICE)
|
| 80 |
|
| 81 |
|
| 82 |
def resize_image_dimensions(
|
|
|
|
| 155 |
strength=strength_slider,
|
| 156 |
generator=generator,
|
| 157 |
num_inference_steps=num_inference_steps_slider,
|
| 158 |
+
max_sequence_length=256,
|
| 159 |
joint_attention_kwargs={"scale": lora_scale},
|
| 160 |
).images[0]
|
| 161 |
|
|
|
|
| 202 |
max_lines=1,
|
| 203 |
placeholder="Enter your model path",
|
| 204 |
info="Currently, only LoRA hosted on Hugging Face'model can be loaded properly.",
|
| 205 |
+
value="XLabs-AI/flux-RealismLora"
|
| 206 |
)
|
| 207 |
lora_weights = gr.Textbox(
|
| 208 |
label="Lora weights",
|
| 209 |
show_label=True,
|
| 210 |
max_lines=1,
|
| 211 |
placeholder="Enter your lora weights name",
|
| 212 |
+
value="lora.safetensors"
|
| 213 |
)
|
| 214 |
lora_scale = gr.Slider(
|
| 215 |
label="Lora scale",
|
|
|
|
| 253 |
minimum=1,
|
| 254 |
maximum=50,
|
| 255 |
step=1,
|
| 256 |
+
value=28,
|
| 257 |
)
|
| 258 |
with gr.Column():
|
| 259 |
output_image_component = ImageSlider(label="Generate image", type="pil", slider_color="pink")
|