Spaces:
Runtime error
Runtime error
import gradio as gr | |
import numpy as np | |
import random | |
import spaces | |
import torch | |
from PIL import Image | |
from torchvision import transforms | |
from diffusers import DiffusionPipeline | |
# Define constants | |
dtype = torch.bfloat16 | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
MAX_SEED = np.iinfo(np.int32).max | |
MAX_IMAGE_SIZE = 2048 | |
# Load the diffusion pipeline | |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device) | |
def preprocess_image(image, image_size): | |
print(f"Preprocessing image to size: {image_size}x{image_size}") | |
preprocess = transforms.Compose([ | |
transforms.Resize((image_size, image_size)), # Use model-specific size | |
transforms.ToTensor(), | |
transforms.Normalize([0.5], [0.5]) # Ensure this matches the VAE's training normalization | |
]) | |
image = preprocess(image).unsqueeze(0).to(device, dtype=dtype) | |
print(f"Image shape after preprocessing: {image.shape}") | |
return image | |
def encode_image(image, vae): | |
print("Encoding image using the VAE") | |
with torch.no_grad(): | |
latents = vae.encode(image).latent_dist.sample() * 0.18215 | |
print(f"Latents shape after encoding: {latents.shape}") | |
return latents | |
def infer(prompt, init_image=None, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)): | |
print(f"Inference started with prompt: {prompt}") | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
print(f"Using seed: {seed}") | |
generator = torch.Generator().manual_seed(seed) | |
if init_image is None: | |
print("No initial image provided, processing text2img") | |
# Process text2img | |
try: | |
print("Calling the diffusion pipeline without latents") | |
result = pipe( | |
prompt=prompt, | |
height=height, | |
width=width, | |
num_inference_steps=num_inference_steps, | |
generator=generator, | |
guidance_scale=0.0 | |
) | |
image = result.images[0] | |
latents = result.latents | |
# Log the latent shapes from text2img process | |
print(f"Latents shape from text2img: {latents.shape}") | |
except Exception as e: | |
print(f"Pipeline call failed with error: {e}") | |
raise | |
else: | |
print("Initial image provided, processing img2img") | |
vae_image_size = pipe.vae.config.sample_size | |
print(f"Expected VAE image size: {vae_image_size}") | |
init_image = init_image.convert("RGB") | |
init_image = preprocess_image(init_image, vae_image_size) | |
latents = encode_image(init_image, pipe.vae) | |
# Interpolating latents | |
print(f"Interpolating latents to size: {(height // 8, width // 8)}") | |
latents = torch.nn.functional.interpolate(latents, size=(height // 8, width // 8)) | |
print(f"Latents shape after interpolation: {latents.shape}") | |
# Convert latent channels to 64 as expected by the transformer | |
latent_channels = pipe.vae.config.latent_channels | |
print(f"Expected latent channels: 64, current latent channels: {latent_channels}") | |
if latent_channels != 64: | |
print(f"Converting latent channels from {latent_channels} to 64") | |
conv = torch.nn.Conv2d(latent_channels, 64, kernel_size=1).to(device, dtype=dtype) | |
latents = conv(latents) | |
print(f"Latents shape after channel conversion: {latents.shape}") | |
# Debugging input shape before calling transformer | |
print(f"Latents shape before reshaping for transformer: {latents.shape}") | |
# Reshape latents to match the transformer's input expectations | |
latents = latents.permute(0, 2, 3, 1).contiguous().view(-1, 64) # Assuming the transformer expects (batch, sequence, feature) | |
print(f"Latents shape after reshaping for transformer: {latents.shape}") | |
# Adding extra debug to understand what transformer expects | |
try: | |
print("Calling the transformer with latents") | |
# Dummy call to transformer to understand the shape requirement | |
_ = pipe.transformer(latents) | |
print("Transformer call succeeded") | |
except Exception as e: | |
print(f"Transformer call failed with error: {e}") | |
raise | |
print("Calling the diffusion pipeline with latents") | |
image = pipe( | |
prompt=prompt, | |
height=height, | |
width=width, | |
num_inference_steps=num_inference_steps, | |
generator=generator, | |
guidance_scale=0.0, | |
latents=latents | |
).images[0] | |
print("Inference complete") | |
return image, seed | |
# Define example prompts | |
examples = [ | |
"a tiny astronaut hatching from an egg on the moon", | |
"a cat holding a sign that says hello world", | |
"an anime illustration of a wiener schnitzel", | |
] | |
# CSS styling for the Japanese-inspired interface | |
css = """ | |
body { | |
background-color: #fff; | |
font-family: 'Noto Sans JP', sans-serif; | |
color: #333; | |
} | |
#col-container { | |
margin: 0 auto; | |
max-width: 520px; | |
border: 2px solid #000; | |
padding: 20px; | |
background-color: #f7f7f7; | |
border-radius: 10px; | |
} | |
.gr-button { | |
background-color: #e60012; | |
color: #fff; | |
border: 2px solid #000; | |
} | |
.gr-button:hover { | |
background-color: #c20010; | |
} | |
.gr-slider, .gr-checkbox, .gr-textbox { | |
border: 2px solid #000; | |
} | |
.gr-accordion { | |
border: 2px solid #000; | |
background-color: #fff; | |
} | |
.gr-image { | |
border: 2px solid #000; | |
} | |
""" | |
# Create the Gradio interface | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="col-container"): | |
gr.Markdown(""" | |
# FLUX.1 [schnell] | |
12B param rectified flow transformer distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/) for 4 step generation | |
[[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)] | |
""") | |
with gr.Row(): | |
prompt = gr.Textbox( | |
label="Prompt", | |
show_label=False, | |
max_lines=1, | |
placeholder="Enter your prompt", | |
container=False, | |
) | |
run_button = gr.Button("Run", scale=0) | |
with gr.Row(): | |
init_image = gr.Image(label="Initial Image (optional)", type="pil") | |
result = gr.Image(label="Result", show_label=False) | |
with gr.Accordion("Advanced Settings", open=False): | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=MAX_SEED, | |
step=1, | |
value=42, | |
) | |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
with gr.Row(): | |
width = gr.Slider( | |
label="Width", | |
minimum=256, | |
maximum=MAX_IMAGE_SIZE, | |
step=32, | |
value=1024, | |
) | |
height = gr.Slider( | |
label="Height", | |
minimum=256, | |
maximum=MAX_IMAGE_SIZE, | |
step=32, | |
value=1024, | |
) | |
with gr.Row(): | |
num_inference_steps = gr.Slider( | |
label="Number of inference steps", | |
minimum=1, | |
maximum=50, | |
step=1, | |
value=4, | |
) | |
gr.Examples( | |
examples=examples, | |
fn=infer, | |
inputs=[prompt], | |
outputs=[result, seed], | |
cache_examples="lazy" | |
) | |
gr.on( | |
triggers=[run_button.click, prompt.submit], | |
fn=infer, | |
inputs=[prompt, init_image, seed, randomize_seed, width, height, num_inference_steps], | |
outputs=[result, seed] | |
) | |
demo.launch() | |