finalProduct / app.py
Himanshu806's picture
Update app.py
1aa779a verified
raw
history blame
10.5 kB
import spaces
import gradio as gr
import numpy as np
import os
import random
import json
from PIL import Image
import torch
from torchvision import transforms
import zipfile
from diffusers import FluxFillPipeline, AutoencoderKL
from PIL import Image
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
# Initialize the pipeline
pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
# Load LoRA models from JSON
with open("lora_models.json", "r") as f:
lora_models = json.load(f)
def download_model(model_name, model_path):
print(f"Downloading model: {model_name} from {model_path}")
try:
pipe.load_lora_weights(model_path)
print(f"Successfully downloaded model: {model_name}")
except Exception as e:
print(f"Failed to download model: {model_name}. Error: {e}")
# Iterate through the models and download each one
for model_name, model_path in lora_models.items():
download_model(model_name, model_path)
lora_models["None"] = None
def calculate_optimal_dimensions(image: Image.Image, scale_factor=1.0):
# Extract the original dimensions
original_width, original_height = image.size
# Set constants
MIN_ASPECT_RATIO = 9 / 16
MAX_ASPECT_RATIO = 16 / 9
FIXED_DIMENSION = 1024
# Calculate the aspect ratio of the original image
original_aspect_ratio = original_width / original_height
# Determine which dimension to fix
if original_aspect_ratio > 1: # Wider than tall
width = FIXED_DIMENSION
height = round(FIXED_DIMENSION / original_aspect_ratio)
else: # Taller than wide
height = FIXED_DIMENSION
width = round(FIXED_DIMENSION * original_aspect_ratio)
# Apply scaling factor
width = round(width * scale_factor)
height = round(height * scale_factor)
# Ensure dimensions are multiples of 8
width = (width // 8) * 8
height = (height // 8) * 8
# Enforce aspect ratio limits
calculated_aspect_ratio = width / height
if calculated_aspect_ratio > MAX_ASPECT_RATIO:
width = (height * MAX_ASPECT_RATIO // 8) * 8
elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
height = (width / MIN_ASPECT_RATIO // 8) * 8
# Ensure width and height remain above the minimum dimensions
width = max(width, 576)
height = max(height, 576)
# Ensure dimensions don't exceed maximum
width = min(width, MAX_IMAGE_SIZE)
height = min(height, MAX_IMAGE_SIZE)
return width, height
@spaces.GPU(durations=300)
def infer(edit_images, prompt, lora_model, strength, seed=42, randomize_seed=False,
guidance_scale=3.5, num_inference_steps=28, lora_scale=0.75,
scale_factor=1.0, progress=gr.Progress(track_tqdm=True)):
gr.Info("Infering")
# Load and enable LoRA if selected
if lora_model != "None":
pipe.load_lora_weights(lora_models[lora_model])
pipe.enable_lora()
else:
pipe.disable_lora()
gr.Info("starting checks")
image = edit_images["background"]
mask = edit_images["layers"][0]
if not image:
gr.Info("Please upload an image.")
return None, None
# Calculate dimensions with scale factor
width, height = calculate_optimal_dimensions(image, scale_factor)
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Generate image
gr.Info(f"Generating image at {width}x{height}")
generator = torch.Generator(device='cuda').manual_seed(seed)
# Configure pipeline parameters
pipeline_kwargs = {
"prompt": prompt,
"prompt_2": prompt,
"image": image,
"mask_image": mask,
"height": height,
"width": width,
"guidance_scale": guidance_scale,
"strength": strength,
"num_inference_steps": num_inference_steps,
"generator": generator,
}
# Add LoRA scale if model supports it
if lora_model != "None":
try:
pipeline_kwargs["cross_attention_kwargs"] = {"scale": lora_scale}
except:
gr.Info("LoRA scale not supported in this model version - using default scaling")
# Run the pipeline
try:
output = pipe(**pipeline_kwargs)
result_image = output.images[0]
except Exception as e:
gr.Error(f"Error during generation: {str(e)}")
return None, seed
output_image_jpg = result_image.convert("RGB")
output_image_jpg.save("output.jpg", "JPEG")
return output_image_jpg, seed
def download_image(image):
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
image.save("output.png", "PNG")
return "output.png"
def save_details(result, edit_image, prompt, lora_model, strength, seed, guidance_scale,
num_inference_steps, lora_scale, scale_factor):
image = edit_image["background"]
mask = edit_image["layers"][0]
if isinstance(result, np.ndarray):
result = Image.fromarray(result)
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
if isinstance(mask, np.ndarray):
mask = Image.fromarray(mask)
result.save("saved_result.png", "PNG")
image.save("saved_image.png", "PNG")
mask.save("saved_mask.png", "PNG")
details = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"lora_model": lora_model,
"lora_scale": lora_scale,
"strength": strength,
"seed": seed,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps,
"scale_factor": scale_factor,
"width": result.width,
"height": result.height
}
with open("details.json", "w") as f:
json.dump(details, f)
# Create a ZIP file
with zipfile.ZipFile("output.zip", "w") as zipf:
zipf.write("saved_result.png")
zipf.write("saved_image.png")
zipf.write("saved_mask.png")
zipf.write("details.json")
return "output.zip"
def set_image_as_inpaint(image):
return image
examples = [
"photography of a young woman, accent lighting, (front view:1.4)",
]
css="""
#col-container {
margin: 0 auto;
max-width: 1000px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 [dev] Inpainting Tool""")
with gr.Row():
with gr.Column():
edit_image = gr.ImageEditor(
label='Upload and draw mask for inpainting',
type='pil',
sources=["upload", "webcam"],
image_mode='RGB',
layers=False,
brush=gr.Brush(colors=["#FFFFFF"]),
)
prompt = gr.Text(
label="Prompt",
show_label=True,
max_lines=2,
placeholder="Enter your prompt",
container=True,
)
lora_model = gr.Dropdown(
label="Select LoRA Model",
choices=list(lora_models.keys()),
value="None",
)
run_button = gr.Button("Run")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=30,
step=0.5,
value=3.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
with gr.Row():
strength = gr.Slider(
label="Strength",
minimum=0,
maximum=1,
step=0.01,
value=0.85,
)
lora_scale = gr.Slider(
label="LoRA Scale",
minimum=0,
maximum=2,
step=0.05,
value=0.75,
info="Controls the influence of the LoRA model"
)
with gr.Row():
scale_factor = gr.Slider(
label="Image Scale Factor",
minimum=0.5,
maximum=2.0,
step=0.1,
value=1.0,
info="Scale factor for image dimensions (1.0 = original, 2.0 = double size)"
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[edit_image, prompt, lora_model, strength, seed, randomize_seed,
guidance_scale, num_inference_steps, lora_scale, scale_factor],
outputs=[result, seed]
)
download_button = gr.Button("Download Image as PNG")
set_inpaint_button = gr.Button("Set Image as Inpaint")
save_button = gr.Button("Save Details")
download_button.click(
fn=download_image,
inputs=[result],
outputs=gr.File(label="Download Image")
)
set_inpaint_button.click(
fn=set_image_as_inpaint,
inputs=[result],
outputs=[edit_image]
)
save_button.click(
fn=save_details,
inputs=[result, edit_image, prompt, lora_model, strength, seed, guidance_scale,
num_inference_steps, lora_scale, scale_factor],
outputs=gr.File(label="Download/Save Status")
)
# Get authentication credentials
PASSWORD = os.getenv("GRADIO_PASSWORD")
USERNAME = os.getenv("GRADIO_USERNAME")
# Create an authentication function
def authenticate(username, password):
if username == USERNAME and password == PASSWORD:
return True
else:
return False
# Launch the app with authentication
demo.launch(debug=True, auth=authenticate)