prithivMLmods's picture
Update app.py
bca1c98 verified
raw
history blame
20.3 kB
import os
import json
import copy
import time
import random
import logging
import numpy as np
from typing import Any, Dict, List, Optional, Union
import torch
from PIL import Image
import gradio as gr
import spaces
from diffusers import DiffusionPipeline
from huggingface_hub import (
hf_hub_download,
HfFileSystem,
ModelCard,
snapshot_download)
from diffusers.utils import load_image
import requests
from urllib.parse import urlparse
import tempfile
import shutil
import uuid
import zipfile
def calculate_shift(
image_seq_len,
base_seq_len: int = 256,
max_seq_len: int = 4096,
base_shift: float = 0.5,
max_shift: float = 1.16,
):
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
b = base_shift - m * base_seq_len
mu = image_seq_len * m + b
return mu
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
# Qwen Image pipeline with live preview capability
@torch.inference_mode()
def qwen_pipe_call_that_returns_an_iterable_of_images(
self,
prompt: Union[str, List[str]] = None,
negative_prompt: Optional[Union[str, List[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 4.0,
num_images_per_prompt: Optional[int] = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
):
height = height or 1024
width = width or 1024
batch_size = 1 if isinstance(prompt, str) else len(prompt)
device = self._execution_device
# Generate intermediate images during the process
for i in range(num_inference_steps):
if i % 5 == 0: # Show progress every 5 steps
# Generate partial result
temp_result = self(
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=max(1, i + 1),
num_images_per_prompt=num_images_per_prompt,
generator=generator,
output_type=output_type,
).images[0]
yield temp_result
torch.cuda.empty_cache()
# Final high-quality result
final_result = self(
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images_per_prompt,
generator=generator,
output_type=output_type,
).images[0]
yield final_result
loras = [
# Sample Qwen-compatible LoRAs
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Studio-Realism/resolve/main/images/2.png",
"title": "Studio Realism",
"repo": "prithivMLmods/Qwen-Image-Studio-Realism",
"weights": "qwen-studio-realism.safetensors",
"trigger_word": "Studio Realism"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Sketch-Smudge/resolve/main/images/1.png",
"title": "Sketch Smudge",
"repo": "prithivMLmods/Qwen-Image-Sketch-Smudge",
"weights": "qwen-sketch-smudge.safetensors",
"trigger_word": "Sketch Smudge"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Anime-LoRA/resolve/main/images/1.png",
"title": "Qwen Anime",
"repo": "prithivMLmods/Qwen-Image-Anime-LoRA",
"weights": "qwen-anime.safetensors",
"trigger_word": "Qwen Anime"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Synthetic-Face/resolve/main/images/2.png",
"title": "Synthetic Face",
"repo": "prithivMLmods/Qwen-Image-Synthetic-Face",
"weights": "qwen-synthetic-face.safetensors",
"trigger_word": "Synthetic Face"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Fragmented-Portraiture/resolve/main/images/3.png",
"title": "Fragmented Portraiture",
"repo": "prithivMLmods/Qwen-Image-Fragmented-Portraiture",
"weights": "qwen-fragmented-portraiture.safetensors",
"trigger_word": "Fragmented Portraiture"
},
]
#--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------#
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
base_model = "Qwen/Qwen-Image"
# Load Qwen Image pipeline
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
# Add aspect ratios for Qwen
aspect_ratios = {
"1:1": (1024, 1024),
"16:9": (1344, 768),
"9:16": (768, 1344),
"4:3": (1152, 896),
"3:4": (896, 1152),
"3:2": (1216, 832),
"2:3": (832, 1216)
}
MAX_SEED = 2**32-1
# Add the custom method to the pipeline
pipe.qwen_pipe_call_that_returns_an_iterable_of_images = qwen_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
class calculateDuration:
def __init__(self, activity_name=""):
self.activity_name = activity_name
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
if self.activity_name:
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
else:
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
def load_lora_opt(pipe, lora_input):
lora_input = lora_input.strip()
if not lora_input:
return
# If it's just an ID like "author/model"
if "/" in lora_input and not lora_input.startswith("http"):
pipe.load_lora_weights(lora_input, adapter_name="default")
return
if lora_input.startswith("http"):
url = lora_input
# Repo page (no blob/resolve)
if "huggingface.co" in url and "/blob/" not in url and "/resolve/" not in url:
repo_id = urlparse(url).path.strip("/")
pipe.load_lora_weights(repo_id, adapter_name="default")
return
# Blob link → convert to resolve link
if "/blob/" in url:
url = url.replace("/blob/", "/resolve/")
# Download direct file
tmp_dir = tempfile.mkdtemp()
local_path = os.path.join(tmp_dir, os.path.basename(urlparse(url).path))
try:
print(f"Downloading LoRA from {url}...")
resp = requests.get(url, stream=True)
resp.raise_for_status()
with open(local_path, "wb") as f:
for chunk in resp.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Saved LoRA to {local_path}")
pipe.load_lora_weights(local_path, adapter_name="default")
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
def update_selection(evt: gr.SelectData, width, height):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
if "aspect" in selected_lora:
if selected_lora["aspect"] == "portrait":
width = 768
height = 1024
elif selected_lora["aspect"] == "landscape":
width = 1024
height = 768
else:
width = 1024
height = 1024
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index,
width,
height,
)
@spaces.GPU(duration=120)
def generate_image(prompt_mash, negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress):
pipe.to("cuda")
generator = torch.Generator(device="cuda").manual_seed(seed)
with calculateDuration("Generating image"):
# Generate image with live preview
for img in pipe.qwen_pipe_call_that_returns_an_iterable_of_images(
prompt=prompt_mash,
negative_prompt=negative_prompt,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
):
yield img
def set_dimensions(ar):
w, h = aspect_ratios[ar]
return gr.update(value=w), gr.update(value=h)
@spaces.GPU(duration=120)
def run_lora(prompt, negative_prompt, use_negative_prompt, aspect_ratio, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.🧨")
selected_lora = loras[selected_index]
lora_path = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
# Set dimensions based on aspect ratio
width, height = aspect_ratios[aspect_ratio]
if trigger_word:
if "trigger_position" in selected_lora:
if selected_lora["trigger_position"] == "prepend":
prompt_mash = f"{trigger_word} {prompt}"
else:
prompt_mash = f"{prompt} {trigger_word}"
else:
prompt_mash = f"{trigger_word} {prompt}"
else:
prompt_mash = prompt
# Handle negative prompt
final_negative_prompt = negative_prompt if use_negative_prompt else ""
with calculateDuration("Unloading LoRA"):
# Clear existing adapters
current_adapters = pipe.get_list_adapters() if hasattr(pipe, 'get_list_adapters') else []
for adapter in current_adapters:
if hasattr(pipe, 'delete_adapters'):
pipe.delete_adapters(adapter)
if hasattr(pipe, 'disable_lora'):
pipe.disable_lora()
# Load new LoRA weights
with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
weight_name = selected_lora.get("weights", None)
load_lora_opt(pipe, lora_path)
if hasattr(pipe, 'set_adapters'):
pipe.set_adapters(["default"], adapter_weights=[lora_scale])
with calculateDuration("Randomizing seed"):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
image_generator = generate_image(prompt_mash, final_negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress)
final_image = None
step_counter = 0
for image in image_generator:
step_counter += 1
final_image = image
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
yield image, seed, gr.update(value=progress_bar, visible=True)
yield final_image, seed, gr.update(value=progress_bar, visible=False)
def get_huggingface_safetensors(link):
split_link = link.split("/")
if len(split_link) == 2:
model_card = ModelCard.load(link)
base_model = model_card.data.get("base_model")
print(base_model)
# Allow Qwen models
if base_model and "qwen" not in base_model.lower():
raise Exception("Qwen-compatible LoRA Not Found!")
image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
trigger_word = model_card.data.get("instance_prompt", "")
image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
fs = HfFileSystem()
try:
list_of_files = fs.ls(link, detail=False)
for file in list_of_files:
if file.endswith(".safetensors"):
safetensors_name = file.split("/")[-1]
if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
image_elements = file.split("/")
image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
except Exception as e:
print(e)
gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
return split_link[1], link, safetensors_name, trigger_word, image_url
def check_custom_model(link):
if link.startswith("https://"):
if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
link_split = link.split("huggingface.co/")
return get_huggingface_safetensors(link_split[1])
else:
return get_huggingface_safetensors(link)
def add_custom_lora(custom_lora):
global loras
if custom_lora:
try:
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
print(f"Loaded custom LoRA: {repo}")
card = f'''
<div class="custom_lora_card">
<span>Loaded custom LoRA:</span>
<div class="card_internal">
<img src="{image}" />
<div>
<h3>{title}</h3>
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
</div>
</div>
</div>
'''
existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
if not existing_item_index:
new_item = {
"image": image,
"title": title,
"repo": repo,
"weights": path,
"trigger_word": trigger_word
}
print(new_item)
existing_item_index = len(loras)
loras.append(new_item)
return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
except Exception as e:
gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-Qwen compatible LoRA")
return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-Qwen compatible LoRA"), gr.update(visible=False), gr.update(), "", None, ""
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def remove_custom_lora():
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
run_lora.zerogpu = True
css = '''
#gen_btn{height: 100%}
#gen_column{align-self: stretch}
#title{text-align: center}
#title h1{font-size: 3em; display:inline-flex; align-items:center}
#title img{width: 100px; margin-right: 0.5em}
#gallery .grid-wrap{height: 10vh}
#lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
.card_internal{display: flex;height: 100px;margin-top: .5em}
.card_internal img{margin-right: 1em}
.styler{--form-gap-width: 0px !important}
#progress{height:30px}
#progress .generating{display:none}
.progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
.progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
'''
with gr.Blocks(theme="bethecloud/storj_theme", css=css, delete_cache=(120, 120)) as app:
title = gr.HTML("""<h1>Qwen Image LoRA DLC🥳</h1>""", elem_id="title",)
selected_index = gr.State(None)
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="✦︎ Choose the LoRA and type the prompt")
with gr.Column(scale=1, elem_id="gen_column"):
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
with gr.Row():
with gr.Column():
selected_info = gr.Markdown("")
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="Qwen LoRA Collection",
allow_preview=False,
columns=3,
elem_id="gallery",
show_share_button=False
)
with gr.Group():
custom_lora = gr.Textbox(label="Enter Custom Qwen LoRA", placeholder="prithivMLmods/Qwen-Image-Sketch-Smudge")
gr.Markdown("[Check the list of Qwen-compatible LoRAs](https://huggingface.co/models?search=qwen+lora)", elem_id="lora_list")
custom_lora_info = gr.HTML(visible=False)
custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
with gr.Column():
progress_bar = gr.Markdown(elem_id="progress", visible=False)
result = gr.Image(label="Generated Image", format="png")
with gr.Row():
aspect_ratio = gr.Dropdown(
label="Aspect Ratio",
choices=list(aspect_ratios.keys()),
value="1:1",
)
with gr.Row():
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
use_negative_prompt = gr.Checkbox(
label="Use negative prompt", value=True, visible=True
)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
value="text, watermark, copyright, blurry, low resolution",
visible=True,
)
with gr.Column():
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=4.0)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=50)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024)
height = gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2, step=0.01, value=1.0)
# Event handlers
gallery.select(
update_selection,
inputs=[width, height],
outputs=[prompt, selected_info, selected_index, width, height]
)
aspect_ratio.change(
fn=set_dimensions,
inputs=aspect_ratio,
outputs=[width, height]
)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt
)
custom_lora.input(
add_custom_lora,
inputs=[custom_lora],
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
)
custom_lora_button.click(
remove_custom_lora,
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
)
gr.on(
triggers=[generate_button.click, prompt.submit],
fn=run_lora,
inputs=[prompt, negative_prompt, use_negative_prompt, aspect_ratio, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed, progress_bar]
)
app.queue()
app.launch(share=False, ssr_mode=False, show_error=True)