finalProduct / app.py
Himanshu-AT
sample changes
ac67229
import spaces
import gradio as gr
import numpy as np
import os
import random
import json
from PIL import Image
import torch
from torchvision import transforms
import zipfile
import cv2 # Added OpenCV import
from diffusers import FluxFillPipeline, AutoencoderKL
from PIL import Image
# from samgeo.text_sam import LangSAM
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# sam = LangSAM(model_type="sam2-hiera-large").to(device)
# Initialize vae model for 16-step encoding
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to("cuda")
pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
with open("lora_models.json", "r") as f:
lora_models = json.load(f)
def download_model(model_name, model_path):
print(f"Downloading model: {model_name} from {model_path}")
try:
pipe.load_lora_weights(model_path)
print(f"Successfully downloaded model: {model_name}")
except Exception as e:
print(f"Failed to download model: {model_name}. Error: {e}")
# Iterate through the models and download each one
for model_name, model_path in lora_models.items():
download_model(model_name, model_path)
lora_models["None"] = None
def calculate_optimal_dimensions(image: Image.Image):
# Extract the original dimensions
original_width, original_height = image.size
# Set constants
MIN_ASPECT_RATIO = 9 / 16
MAX_ASPECT_RATIO = 16 / 9
FIXED_DIMENSION = 1024
# Calculate the aspect ratio of the original image
original_aspect_ratio = original_width / original_height
# Determine which dimension to fix
if original_aspect_ratio > 1: # Wider than tall
width = FIXED_DIMENSION
height = round(FIXED_DIMENSION / original_aspect_ratio)
else: # Taller than wide
height = FIXED_DIMENSION
width = round(FIXED_DIMENSION * original_aspect_ratio)
# Ensure dimensions are multiples of 8
width = (width // 8) * 8
height = (height // 8) * 8
# Enforce aspect ratio limits
calculated_aspect_ratio = width / height
if calculated_aspect_ratio > MAX_ASPECT_RATIO:
width = (height * MAX_ASPECT_RATIO // 8) * 8
elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
height = (width / MIN_ASPECT_RATIO // 8) * 8
# Ensure width and height remain above the minimum dimensions
width = max(width, 576) if width == FIXED_DIMENSION else width
height = max(height, 576) if height == FIXED_DIMENSION else height
return width, height
def process_unmasked_area(image, mask, blur_strength=25):
"""
Process the unmasked portion of the image to remove context while preserving the masked area
Args:
image: PIL Image - the original input image
mask: PIL Image - the mask with white (255) indicating the area to preserve
blur_strength: int - strength of blur to apply to unmasked regions
Returns:
PIL Image with unmasked regions processed
"""
# Convert PIL images to numpy arrays for OpenCV processing
img_np = np.array(image)
mask_np = np.array(mask)
# Ensure mask is binary (0 and 255)
_, mask_binary = cv2.threshold(mask_np, 127, 255, cv2.THRESH_BINARY)
# Create inverted mask (255 in areas we want to process)
mask_inv = cv2.bitwise_not(mask_binary)
# Apply strong blur to remove context in unmasked areas
blurred = cv2.GaussianBlur(img_np, (blur_strength, blur_strength), 0)
# Create the processed image
# Keep original pixels where mask is white (255)
# Use blurred pixels where mask is black (0)
processed_np = np.where(mask_binary[:, :, None] == 255, img_np, blurred)
# Convert back to PIL image
processed_image = Image.fromarray(processed_np)
return processed_image
def vae_encode_16steps(image):
"""
Encode image using the VAE with 16 steps
Args:
image: PIL Image to encode
Returns:
Encoded latent representation
"""
# Convert PIL image to tensor
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
image_tensor = transform(image).unsqueeze(0).to("cuda")
# Encode with 16 steps
with torch.no_grad():
latent = vae.encode(image_tensor, num_inference_steps=16).latent_dist.sample()
latent = latent * vae.config.scaling_factor
return latent
@spaces.GPU(durations=300)
def infer(edit_images, prompt, lora_model, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
# pipe.enable_xformers_memory_efficient_attention()
gr.Info("Infering")
if lora_model != "None":
pipe.load_lora_weights(lora_models[lora_model])
pipe.enable_lora()
gr.Info("starting checks")
image = edit_images["background"]
mask = edit_images["layers"][0]
if not image:
gr.Info("Please upload an image.")
return None, None
width, height = calculate_optimal_dimensions(image)
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Process the unmasked portion to remove context
processed_image = process_unmasked_area(image, mask)
# Create latent encodings using VAE with 16 steps
image_latent = vae_encode_16steps(processed_image)
gr.Info("generating image")
image = pipe(
# Use the encoded image latent
mask_image_latent=image_latent,
prompt=prompt,
prompt_2=prompt,
image=processed_image,
mask_image=mask,
height=height,
width=width,
guidance_scale=guidance_scale,
# strength=strength,
num_inference_steps=num_inference_steps,
generator=torch.Generator(device='cuda').manual_seed(seed),
# generator=torch.Generator().manual_seed(seed),
# lora_scale=0.75 // not supported in this version
).images[0]
output_image_jpg = image.convert("RGB")
output_image_jpg.save("output.jpg", "JPEG")
return output_image_jpg, seed
# return image, seed
def download_image(image):
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
image.save("output.png", "PNG")
return "output.png"
def save_details(result, edit_image, prompt, lora_model, strength, seed, guidance_scale, num_inference_steps):
image = edit_image["background"]
mask = edit_image["layers"][0]
if isinstance(result, np.ndarray):
result = Image.fromarray(result)
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
if isinstance(mask, np.ndarray):
mask = Image.fromarray(mask)
result.save("saved_result.png", "PNG")
image.save("saved_image.png", "PNG")
mask.save("saved_mask.png", "PNG")
details = {
"prompt": prompt,
"lora_model": lora_model,
"strength": strength,
"seed": seed,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps
}
with open("details.json", "w") as f:
json.dump(details, f)
# Create a ZIP file
with zipfile.ZipFile("output.zip", "w") as zipf:
zipf.write("saved_result.png")
zipf.write("saved_image.png")
zipf.write("saved_mask.png")
zipf.write("details.json")
return "output.zip"
def set_image_as_inpaint(image):
return image
# def generate_mask(image, click_x, click_y):
# text_prompt = "face"
# mask = sam.predict(image, text_prompt, box_threshold=0.24, text_threshold=0.24)
# return mask
examples = [
"photography of a young woman, accent lighting, (front view:1.4), "
# "a tiny astronaut hatching from an egg on the moon",
# "a cat holding a sign that says hello world",
# "an anime illustration of a wiener schnitzel",
]
css="""
#col-container {
margin: 0 auto;
max-width: 1000px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 [dev]
""")
with gr.Row():
with gr.Column():
edit_image = gr.ImageEditor(
label='Upload and draw mask for inpainting',
type='pil',
sources=["upload", "webcam"],
image_mode='RGB',
layers=False,
brush=gr.Brush(colors=["#FFFFFF"]),
# height=600
)
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=2,
placeholder="Enter your prompt",
container=False,
)
lora_model = gr.Dropdown(
label="Select LoRA Model",
choices=list(lora_models.keys()),
value="None",
)
run_button = gr.Button("Run")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=30,
step=0.5,
value=50,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
with gr.Row():
strength = gr.Slider(
label="Strength",
minimum=0,
maximum=1,
step=0.01,
value=0.85,
)
# width = gr.Slider(
# label="width",
# minimum=512,
# maximum=3072,
# step=1,
# value=1024,
# )
# height = gr.Slider(
# label="height",
# minimum=512,
# maximum=3072,
# step=1,
# value=1024,
# )
gr.on(
triggers=[run_button.click, prompt.submit],
fn = infer,
inputs = [edit_image, prompt, lora_model, strength, seed, randomize_seed, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
download_button = gr.Button("Download Image as PNG")
set_inpaint_button = gr.Button("Set Image as Inpaint")
save_button = gr.Button("Save Details")
download_button.click(
fn=download_image,
inputs=[result],
outputs=gr.File(label="Download Image")
)
set_inpaint_button.click(
fn=set_image_as_inpaint,
inputs=[result],
outputs=[edit_image]
)
save_button.click(
fn=save_details,
inputs=[result, edit_image, prompt, lora_model, strength, seed, guidance_scale, num_inference_steps],
outputs=gr.File(label="Download/Save Status")
)
# edit_image.select(
# fn=generate_mask,
# inputs=[edit_image, gr.Number(), gr.Number()],
# outputs=[edit_image]
# )
# demo.launch()
PASSWORD = os.getenv("GRADIO_PASSWORD")
USERNAME = os.getenv("GRADIO_USERNAME")
# Create an authentication object
def authenticate(username, password):
if username == USERNAME and password == PASSWORD:
return True
else:
return False
# Launch the app with authentication
demo.launch(debug=True, auth=authenticate)