Spaces:
Running
on
Zero
Running
on
Zero
| import os | |
| import random | |
| import uuid | |
| from typing import Tuple | |
| import gradio as gr | |
| import numpy as np | |
| from PIL import Image | |
| import spaces | |
| import torch | |
| from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler | |
| title = """<h1>SDXL LoRA DLC π€©</h1>""" | |
| # Ensure assets directory exists if needed for predefined images | |
| if not os.path.exists("assets"): | |
| print("Warning: 'assets' directory not found. Predefined gallery might be empty.") | |
| # Optionally create it: os.makedirs("assets") | |
| def save_image(img): | |
| # Ensure an 'outputs' directory exists to save generated images (optional, good practice) | |
| output_dir = "outputs" | |
| if not os.path.exists(output_dir): | |
| os.makedirs(output_dir) | |
| unique_name = os.path.join(output_dir, str(uuid.uuid4()) + ".png") | |
| img.save(unique_name) | |
| return unique_name | |
| def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| return seed | |
| MAX_SEED = np.iinfo(np.int32).max | |
| pipe = None # Initialize pipe to None | |
| if not torch.cuda.is_available(): | |
| DESCRIPTIONz += "\n<p>β οΈRunning on CPU, This may not work on CPU. If it runs for an extended time or if you encounter errors, try running it on a GPU by duplicating the space using @spaces.GPU(). +import spaces.π</p>" | |
| # Optionally, you could add a placeholder or disable functionality here | |
| else: | |
| USE_TORCH_COMPILE = False # Set to False as 0 is not standard boolean | |
| ENABLE_CPU_OFFLOAD = False # Set to False as 0 is not standard boolean | |
| # Moved pipe initialization inside the CUDA check | |
| pipe = StableDiffusionXLPipeline.from_pretrained( | |
| "SG161222/RealVisXL_V4.0_Lightning", | |
| torch_dtype=torch.float16, | |
| use_safetensors=True, | |
| ) | |
| pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) | |
| LORA_OPTIONS = { | |
| "Realism (face/character)π¦π»": ("prithivMLmods/Canopus-Realism-LoRA", "Canopus-Realism-LoRA.safetensors", "rlms"), | |
| "Pixar (art/toons)π": ("prithivMLmods/Canopus-Pixar-Art", "Canopus-Pixar-Art.safetensors", "pixar"), | |
| "Photoshoot (camera/film)πΈ": ("prithivMLmods/Canopus-Photo-Shoot-Mini-LoRA", "Canopus-Photo-Shoot-Mini-LoRA.safetensors", "photo"), | |
| "Clothing (hoodies/pant/shirts)π": ("prithivMLmods/Canopus-Clothing-Adp-LoRA", "Canopus-Dress-Clothing-LoRA.safetensors", "clth"), | |
| "Interior Architecture (house/hotel)π ": ("prithivMLmods/Canopus-Interior-Architecture-0.1", "Canopus-Interior-Architecture-0.1Ξ΄.safetensors", "arch"), | |
| "Fashion Product (wearing/usable)π": ("prithivMLmods/Canopus-Fashion-Product-Dilation", "Canopus-Fashion-Product-Dilation.safetensors", "fashion"), | |
| "Minimalistic Image (minimal/detailed)ποΈ": ("prithivMLmods/Pegasi-Minimalist-Image-Style", "Pegasi-Minimalist-Image-Style.safetensors", "minimalist"), | |
| "Modern Clothing (trend/new)π": ("prithivMLmods/Canopus-Modern-Clothing-Design", "Canopus-Modern-Clothing-Design.safetensors", "mdrnclth"), | |
| "Animaliea (farm/wild)π«": ("prithivMLmods/Canopus-Animaliea-Artism", "Canopus-Animaliea-Artism.safetensors", "Animaliea"), | |
| "Liquid Wallpaper (minimal/illustration)πΌοΈ": ("prithivMLmods/Canopus-Liquid-Wallpaper-Art", "Canopus-Liquid-Wallpaper-Minimalize-LoRA.safetensors", "liquid"), | |
| "Canes Cars (realistic/futurecars)π": ("prithivMLmods/Canes-Cars-Model-LoRA", "Canes-Cars-Model-LoRA.safetensors", "car"), | |
| "Pencil Art (characteristic/creative)βοΈ": ("prithivMLmods/Canopus-Pencil-Art-LoRA", "Canopus-Pencil-Art-LoRA.safetensors", "Pencil Art"), | |
| "Art Minimalistic (paint/semireal)π¨": ("prithivMLmods/Canopus-Art-Medium-LoRA", "Canopus-Art-Medium-LoRA.safetensors", "mdm"), | |
| } | |
| # Load LoRAs only if pipe is initialized | |
| if pipe: | |
| for model_name, weight_name, adapter_name in LORA_OPTIONS.values(): | |
| try: | |
| pipe.load_lora_weights(model_name, weight_name=weight_name, adapter_name=adapter_name) | |
| print(f"Loaded LoRA: {adapter_name}") | |
| except Exception as e: | |
| print(f"Warning: Could not load LoRA {adapter_name} from {model_name}. Error: {e}") | |
| pipe.to("cuda") | |
| print("Pipeline and LoRAs loaded to CUDA.") | |
| else: | |
| print("Pipeline not initialized (likely no CUDA available).") | |
| style_list = [ | |
| { | |
| "name": "3840 x 2160", | |
| "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", | |
| "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly", | |
| }, | |
| { | |
| "name": "2560 x 1440", | |
| "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", | |
| "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly", | |
| }, | |
| { | |
| "name": "HD+", | |
| "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic", | |
| "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly", | |
| }, | |
| { | |
| "name": "Style Zero", | |
| "prompt": "{prompt}", | |
| "negative_prompt": "", | |
| }, | |
| ] | |
| styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list} | |
| DEFAULT_STYLE_NAME = "3840 x 2160" | |
| STYLE_NAMES = list(styles.keys()) | |
| def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]: | |
| p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) # Use .get for safety | |
| if not negative: | |
| negative = "" | |
| return p.replace("{prompt}", positive), n + " " + negative # Add space for clarity | |
| def generate( | |
| prompt: str, | |
| negative_prompt: str = "", | |
| use_negative_prompt: bool = False, | |
| seed: int = 0, | |
| width: int = 1024, | |
| height: int = 1024, | |
| guidance_scale: float = 3, | |
| randomize_seed: bool = False, | |
| style_name: str = DEFAULT_STYLE_NAME, | |
| lora_model: str = "Realism (face/character)π¦π»", | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| if pipe is None: | |
| raise gr.Error("Pipeline not initialized. Check if CUDA is available and drivers are installed.") | |
| seed = int(randomize_seed_fn(seed, randomize_seed)) | |
| # Apply style first | |
| positive_prompt, base_negative_prompt = apply_style(style_name, prompt, negative_prompt if use_negative_prompt else "") | |
| # If user explicitly provided a negative prompt and wants to use it, append it | |
| # (apply_style already incorporates the style's negative prompt) | |
| # This logic might need adjustment depending on desired behavior: replace or append? | |
| # Current: Style neg prompt + user neg prompt | |
| effective_negative_prompt = base_negative_prompt | |
| if use_negative_prompt and negative_prompt: | |
| # Check if the negative prompt from apply_style is already there to avoid duplication | |
| if not negative_prompt in effective_negative_prompt: | |
| effective_negative_prompt = (effective_negative_prompt + " " + negative_prompt).strip() | |
| # Ensure LoRA selection is valid | |
| if lora_model not in LORA_OPTIONS: | |
| print(f"Warning: Invalid LoRA selection '{lora_model}'. Using default or first available.") | |
| # Fallback logic could be added here, e.g., use the first key | |
| lora_model = next(iter(LORA_OPTIONS)) # Get the first key as a fallback | |
| model_name, weight_name, adapter_name = LORA_OPTIONS[lora_model] | |
| try: | |
| print(f"Setting adapter: {adapter_name}") | |
| pipe.set_adapters(adapter_name) | |
| # Optional: Add LoRA scale if needed, often done via cross_attention_kwargs | |
| # Example: cross_attention_kwargs={"scale": lora_scale} | |
| # Note: RealVisXL Lightning might not need explicit scale adjustments like older models. | |
| # Using 0.65 as hardcoded before. Keeping it. | |
| lora_scale = 0.65 | |
| print(f"Generating with prompt: '{positive_prompt}'") | |
| print(f"Negative prompt: '{effective_negative_prompt}'") | |
| print(f"Seed: {seed}, W: {width}, H: {height}, Scale: {guidance_scale}, Steps: 20") | |
| images = pipe( | |
| prompt=positive_prompt, | |
| negative_prompt=effective_negative_prompt, | |
| width=width, | |
| height=height, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=20, # Lightning models use fewer steps | |
| num_images_per_prompt=1, | |
| generator=torch.Generator("cuda").manual_seed(seed), # Ensure reproducibility | |
| cross_attention_kwargs={"scale": lora_scale}, # Apply LoRA scale if needed | |
| output_type="pil", | |
| ).images | |
| image_paths = [save_image(img) for img in images] | |
| print(f"Generated {len(image_paths)} image(s).") | |
| return image_paths, seed | |
| except Exception as e: | |
| print(f"Error during generation: {e}") | |
| # Raise a Gradio error to display it in the UI | |
| import traceback | |
| traceback.print_exc() | |
| raise gr.Error(f"Generation failed: {e}") | |
| examples = [ | |
| ["Realism: Man in the style of dark beige and brown, uhd image, youthful protagonists, nonrepresentational"], | |
| ["Pixar: A young man with light brown wavy hair and light brown eyes sitting in an armchair and looking directly at the camera, pixar style, disney pixar, office background, ultra detailed, 1 man"], | |
| ["Hoodie: Front view, capture a urban style, Superman Hoodie, technical materials, fabric small point label on text Blue theory, the design is minimal, with a raised collar, fabric is a Light yellow, low angle to capture the Hoodies form and detailing, f/5.6 to focus on the hoodies craftsmanship, solid grey background, studio light setting, with batman logo in the chest region of the t-shirt"], | |
| ] | |
| css = ''' | |
| .gradio-container{max-width: 900px !important; margin: auto;} | |
| h1{text-align:center} | |
| #gallery { min-height: 400px; } | |
| footer { display: none !important; visibility: hidden !important; } | |
| ''' | |
| def load_predefined_images(): | |
| predefined_images = [] | |
| asset_dir = "assets" | |
| if os.path.exists(asset_dir): | |
| valid_extensions = {".png", ".jpg", ".jpeg", ".webp"} | |
| try: | |
| for i in range(1, 10): # Try loading 1.png to 9.png | |
| for ext in valid_extensions: | |
| img_path = os.path.join(asset_dir, f"{i}{ext}") | |
| if os.path.exists(img_path): | |
| predefined_images.append(img_path) | |
| break # Found image for this number, move to next | |
| except Exception as e: | |
| print(f"Error loading predefined images: {e}") | |
| if not predefined_images: | |
| print("No predefined images found in assets folder (e.g., assets/1.png, assets/2.jpg).") | |
| return predefined_images | |
| # --- Gradio UI Definition --- | |
| with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo: | |
| gr.HTML(title) | |
| # Define the output gallery component first | |
| result_gallery = gr.Gallery( | |
| label="Generated Images", | |
| show_label=False, | |
| elem_id="gallery", # For CSS styling | |
| columns=1, # Adjust as needed | |
| height="auto" | |
| ) | |
| # Define the output seed component | |
| output_seed = gr.State(value=0) # Use gr.State for non-displayed outputs or values needing persistence | |
| with gr.Row(): | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| show_label=False, | |
| max_lines=2, | |
| placeholder="Enter your prompt here...", | |
| container=False, | |
| scale=7 # Give more space to prompt | |
| ) | |
| run_button = gr.Button("Generate", scale=1, variant="primary") | |
| with gr.Row(): | |
| model_choice = gr.Dropdown( | |
| label="LoRA Selection", | |
| choices=list(LORA_OPTIONS.keys()), | |
| value="Realism (face/character)π¦π»", # Default selection | |
| scale=3 | |
| ) | |
| style_selection = gr.Radio( | |
| show_label=False, # Label provided by Row context or Accordion | |
| container=True, | |
| interactive=True, | |
| choices=STYLE_NAMES, | |
| value=DEFAULT_STYLE_NAME, | |
| label="Quality Style", | |
| scale=2 | |
| ) | |
| with gr.Accordion("Advanced options", open=False): | |
| with gr.Row(): | |
| use_negative_prompt = gr.Checkbox(label="Use Negative Prompt", value=True, scale=1) | |
| randomize_seed = gr.Checkbox(label="Randomize Seed", value=True, scale=1) | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=0, # Initial value | |
| visible=True, # Controlled by randomize_seed logic later if needed | |
| scale=3 | |
| ) | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| lines=2, | |
| max_lines=4, | |
| value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation", | |
| placeholder="Enter things to avoid...", | |
| visible=True, # Controlled by use_negative_prompt checkbox | |
| ) | |
| with gr.Row(): | |
| width = gr.Slider( | |
| label="Width", | |
| minimum=512, | |
| maximum=1536, # Adjusted max for typical SDXL usage | |
| step=64, # Step by 64 for common resolutions | |
| value=1024, | |
| ) | |
| height = gr.Slider( | |
| label="Height", | |
| minimum=512, | |
| maximum=1536, # Adjusted max | |
| step=64, # Step by 64 | |
| value=1024, | |
| ) | |
| guidance_scale = gr.Slider( | |
| label="Guidance Scale (CFG)", | |
| minimum=1.0, # Usually start CFG from 1 | |
| maximum=10.0, # Lightning models often use low CFG | |
| step=0.1, | |
| value=3.0, | |
| ) | |
| # --- Event Listeners --- | |
| # Toggle negative prompt visibility | |
| use_negative_prompt.change( | |
| fn=lambda x: gr.update(visible=x), | |
| inputs=use_negative_prompt, | |
| outputs=negative_prompt, | |
| api_name=False, | |
| ) | |
| # Toggle seed slider visibility based on randomize checkbox | |
| # def toggle_seed_visibility(randomize): | |
| # return gr.update(interactive=not randomize) | |
| # randomize_seed.change( | |
| # fn=toggle_seed_visibility, | |
| # inputs=randomize_seed, | |
| # outputs=seed, | |
| # api_name=False | |
| # ) | |
| # --- Image Generation Trigger --- | |
| inputs = [ | |
| prompt, | |
| negative_prompt, | |
| use_negative_prompt, | |
| seed, | |
| width, | |
| height, | |
| guidance_scale, | |
| randomize_seed, | |
| style_selection, | |
| model_choice, | |
| ] | |
| # Define outputs using the created components | |
| outputs = [ | |
| result_gallery, # The gallery to display images | |
| output_seed # The state to hold the used seed | |
| ] | |
| # Connect the generate function to the button click and prompt submit | |
| gr.on( | |
| triggers=[run_button.click, prompt.submit], | |
| fn=generate, | |
| inputs=inputs, | |
| outputs=outputs, | |
| api_name="run" # Keep API name if needed | |
| ) | |
| # Update the seed slider display when a new seed is generated and returned via output_seed | |
| output_seed.change(fn=lambda x: x, inputs=output_seed, outputs=seed, api_name=False) | |
| # --- Examples --- | |
| gr.Examples( | |
| examples=examples, | |
| inputs=[prompt], # Only prompt needed for examples | |
| outputs=[result_gallery, output_seed], # Update example outputs as well | |
| fn=generate, # Function to run when example is clicked | |
| cache_examples=os.getenv("CACHE_EXAMPLES", "False").lower() == "true" # Cache examples in Spaces | |
| ) | |
| # --- Predefined Image Gallery (Static) --- | |
| with gr.Column(): # Use column for better layout control if needed | |
| gr.Markdown("### Example Gallery (Predefined)") | |
| try: | |
| predefined_gallery_images = load_predefined_images() | |
| if predefined_gallery_images: | |
| predefined_gallery = gr.Gallery( | |
| label="Predefined Images", | |
| value=predefined_gallery_images, | |
| columns=3, | |
| show_label=False | |
| ) | |
| else: | |
| gr.Markdown("_(No predefined images found in 'assets' folder)_") | |
| except Exception as e: | |
| gr.Markdown(f"_Error loading predefined gallery: {e}_") | |
| # --- Launch the App --- | |
| if __name__ == "__main__": | |
| demo.queue(max_size=20).launch(debug=True) # Add debug=True for more detailed logs |