import os import json import copy import time import random import logging import numpy as np from typing import Any, Dict, List, Optional, Union import torch from PIL import Image import gradio as gr import spaces from diffusers import DiffusionPipeline from huggingface_hub import ( hf_hub_download, HfFileSystem, ModelCard, snapshot_download) from diffusers.utils import load_image import requests from urllib.parse import urlparse import tempfile import shutil import uuid import zipfile def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.16, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu def save_image(img): unique_name = str(uuid.uuid4()) + ".png" img.save(unique_name) return unique_name def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed # Qwen Image pipeline with live preview capability @torch.inference_mode() def qwen_pipe_call_that_returns_an_iterable_of_images( self, prompt: Union[str, List[str]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 4.0, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", ): height = height or 1024 width = width or 1024 batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # Generate intermediate images during the process for i in range(num_inference_steps): if i % 5 == 0: # Show progress every 5 steps # Generate partial result temp_result = self( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=guidance_scale, num_inference_steps=max(1, i + 1), num_images_per_prompt=num_images_per_prompt, generator=generator, output_type=output_type, ).images[0] yield temp_result torch.cuda.empty_cache() # Final high-quality result final_result = self( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=num_images_per_prompt, generator=generator, output_type=output_type, ).images[0] yield final_result loras = [ # Sample Qwen-compatible LoRAs { "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Studio-Realism/resolve/main/images/2.png", "title": "Studio Realism", "repo": "prithivMLmods/Qwen-Image-Studio-Realism", "weights": "qwen-studio-realism.safetensors", "trigger_word": "Studio Realism" }, { "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Sketch-Smudge/resolve/main/images/1.png", "title": "Sketch Smudge", "repo": "prithivMLmods/Qwen-Image-Sketch-Smudge", "weights": "qwen-sketch-smudge.safetensors", "trigger_word": "Sketch Smudge" }, { "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Anime-LoRA/resolve/main/images/1.png", "title": "Qwen Anime", "repo": "prithivMLmods/Qwen-Image-Anime-LoRA", "weights": "qwen-anime.safetensors", "trigger_word": "Qwen Anime" }, { "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Synthetic-Face/resolve/main/images/2.png", "title": "Synthetic Face", "repo": "prithivMLmods/Qwen-Image-Synthetic-Face", "weights": "qwen-synthetic-face.safetensors", "trigger_word": "Synthetic Face" }, { "image": "https://huggingface.co/prithivMLmods/Qwen-Image-Fragmented-Portraiture/resolve/main/images/3.png", "title": "Fragmented Portraiture", "repo": "prithivMLmods/Qwen-Image-Fragmented-Portraiture", "weights": "qwen-fragmented-portraiture.safetensors", "trigger_word": "Fragmented Portraiture" }, ] #--------------------------------------------------Model Initialization-----------------------------------------------------------------------------------------# dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" base_model = "Qwen/Qwen-Image" # Load Qwen Image pipeline pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device) # Add aspect ratios for Qwen aspect_ratios = { "1:1": (1024, 1024), "16:9": (1344, 768), "9:16": (768, 1344), "4:3": (1152, 896), "3:4": (896, 1152), "3:2": (1216, 832), "2:3": (832, 1216) } MAX_SEED = 2**32-1 # Add the custom method to the pipeline pipe.qwen_pipe_call_that_returns_an_iterable_of_images = qwen_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) class calculateDuration: def __init__(self, activity_name=""): self.activity_name = activity_name def __enter__(self): self.start_time = time.time() return self def __exit__(self, exc_type, exc_value, traceback): self.end_time = time.time() self.elapsed_time = self.end_time - self.start_time if self.activity_name: print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds") else: print(f"Elapsed time: {self.elapsed_time:.6f} seconds") def load_lora_opt(pipe, lora_input): lora_input = lora_input.strip() if not lora_input: return # If it's just an ID like "author/model" if "/" in lora_input and not lora_input.startswith("http"): pipe.load_lora_weights(lora_input, adapter_name="default") return if lora_input.startswith("http"): url = lora_input # Repo page (no blob/resolve) if "huggingface.co" in url and "/blob/" not in url and "/resolve/" not in url: repo_id = urlparse(url).path.strip("/") pipe.load_lora_weights(repo_id, adapter_name="default") return # Blob link → convert to resolve link if "/blob/" in url: url = url.replace("/blob/", "/resolve/") # Download direct file tmp_dir = tempfile.mkdtemp() local_path = os.path.join(tmp_dir, os.path.basename(urlparse(url).path)) try: print(f"Downloading LoRA from {url}...") resp = requests.get(url, stream=True) resp.raise_for_status() with open(local_path, "wb") as f: for chunk in resp.iter_content(chunk_size=8192): f.write(chunk) print(f"Saved LoRA to {local_path}") pipe.load_lora_weights(local_path, adapter_name="default") finally: shutil.rmtree(tmp_dir, ignore_errors=True) def update_selection(evt: gr.SelectData, width, height): selected_lora = loras[evt.index] new_placeholder = f"Type a prompt for {selected_lora['title']}" lora_repo = selected_lora["repo"] updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅" if "aspect" in selected_lora: if selected_lora["aspect"] == "portrait": width = 768 height = 1024 elif selected_lora["aspect"] == "landscape": width = 1024 height = 768 else: width = 1024 height = 1024 return ( gr.update(placeholder=new_placeholder), updated_text, evt.index, width, height, ) @spaces.GPU(duration=120) def generate_image(prompt_mash, negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress): pipe.to("cuda") generator = torch.Generator(device="cuda").manual_seed(seed) with calculateDuration("Generating image"): # Generate image with live preview for img in pipe.qwen_pipe_call_that_returns_an_iterable_of_images( prompt=prompt_mash, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=cfg_scale, width=width, height=height, generator=generator, ): yield img def set_dimensions(ar): w, h = aspect_ratios[ar] return gr.update(value=w), gr.update(value=h) @spaces.GPU(duration=120) def run_lora(prompt, negative_prompt, use_negative_prompt, aspect_ratio, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)): if selected_index is None: raise gr.Error("You must select a LoRA before proceeding.🧨") selected_lora = loras[selected_index] lora_path = selected_lora["repo"] trigger_word = selected_lora["trigger_word"] # Set dimensions based on aspect ratio width, height = aspect_ratios[aspect_ratio] if trigger_word: if "trigger_position" in selected_lora: if selected_lora["trigger_position"] == "prepend": prompt_mash = f"{trigger_word} {prompt}" else: prompt_mash = f"{prompt} {trigger_word}" else: prompt_mash = f"{trigger_word} {prompt}" else: prompt_mash = prompt # Handle negative prompt final_negative_prompt = negative_prompt if use_negative_prompt else "" with calculateDuration("Unloading LoRA"): # Clear existing adapters current_adapters = pipe.get_list_adapters() if hasattr(pipe, 'get_list_adapters') else [] for adapter in current_adapters: if hasattr(pipe, 'delete_adapters'): pipe.delete_adapters(adapter) if hasattr(pipe, 'disable_lora'): pipe.disable_lora() # Load new LoRA weights with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"): weight_name = selected_lora.get("weights", None) load_lora_opt(pipe, lora_path) if hasattr(pipe, 'set_adapters'): pipe.set_adapters(["default"], adapter_weights=[lora_scale]) with calculateDuration("Randomizing seed"): if randomize_seed: seed = random.randint(0, MAX_SEED) image_generator = generate_image(prompt_mash, final_negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress) final_image = None step_counter = 0 for image in image_generator: step_counter += 1 final_image = image progress_bar = f'
"+trigger_word+"
as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}