Deadmon's picture
Update app.py
a5dfd22 verified
raw
history blame
4.16 kB
import os
import torch
import gradio as gr
import numpy as np
from PIL import Image
from einops import rearrange
import requests
import spaces
from huggingface_hub import login
from gradio_imageslider import ImageSlider
from diffusers.utils import load_image
from diffusers import FluxControlNetPipeline, FluxControlNetModel
# Source: https://github.com/XLabs-AI/x-flux.git
name = "flux-dev"
device = torch.device("cuda")
offload = False
is_schnell = name == "flux-schnell"
base_model = 'black-forest-labs/FLUX.1-dev'
controlnet_model = 'InstantX/FLUX.1-dev-Controlnet-Union'
# Load the new ControlNet model and pipeline
controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
pipe.to(device)
controlnet_conditioning_scale = 0.5
control_modes = {
"canny": 0,
"tile": 1,
"depth": 2,
"blur": 3,
"pose": 4,
"gray": 5,
"lq": 6,
}
def load_and_convert_image(image):
if isinstance(image, str):
image = Image.open(image)
elif isinstance(image, bytes):
image = Image.open(io.BytesIO(image))
# Convert AVIF to PNG if necessary
if image.format == 'AVIF':
image = image.convert("RGB") # Convert to a format PIL can handle
return image
def preprocess_image(image, target_width, target_height, crop=True):
image = load_and_convert_image(image)
if crop:
original_width, original_height = image.size
# Resize to match the target size without stretching
scale = max(target_width / original_width, target_height / original_height)
resized_width = int(scale * original_width)
resized_height = int(scale * original_height)
image = image.resize((resized_width, resized_height), Image.LANCZOS)
# Center crop to match the target dimensions
left = (resized_width - target_width) // 2
top = (resized_height - target_height) // 2
image = image.crop((left, top, left + target_width, top + target_height))
else:
image = image.resize((target_width, target_height), Image.LANCZOS)
return image
@spaces.GPU(duration=120)
def generate_image(prompt, control_image, control_mode, num_steps=50, guidance=4, width=512, height=512, seed=42, random_seed=False):
if random_seed:
seed = np.random.randint(0, 10000)
if not os.path.isdir("./controlnet_results/"):
os.makedirs("./controlnet_results/")
torch_device = torch.device("cuda")
control_image = preprocess_image(control_image, width, height)
torch.manual_seed(seed)
with torch.no_grad():
image = pipe(
prompt,
control_image=control_image,
control_mode=control_modes[control_mode],
width=width,
height=height,
controlnet_conditioning_scale=controlnet_conditioning_scale,
num_inference_steps=num_steps,
guidance_scale=guidance,
).images[0]
return [control_image, image] # Return both images for slider
interface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Prompt"),
gr.Image(type="pil", label="Control Image"),
gr.Dropdown(choices=list(control_modes.keys()), label="Control Mode", value="canny"),
gr.Slider(step=1, minimum=1, maximum=64, value=28, label="Num Steps"),
gr.Slider(minimum=0.1, maximum=10, value=4, label="Guidance"),
gr.Slider(minimum=128, maximum=2048, step=128, value=1024, label="Width"),
gr.Slider(minimum=128, maximum=2048, step=128, value=1024, label="Height"),
gr.Number(value=42, label="Seed"),
gr.Checkbox(label="Random Seed")
],
outputs=ImageSlider(label="Before / After"), # Use ImageSlider as the output
title="FLUX.1 Controlnet Canny",
description="Generate images using ControlNet and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]"
)
if __name__ == "__main__":
interface.launch(share=True)