import os import gc import gradio as gr import numpy as np import torch import json import spaces import config import utils import logging from PIL import Image, PngImagePlugin from datetime import datetime from diffusers.models import AutoencoderKL from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) DESCRIPTION = "PonyDiffusion V6 XL" if not torch.cuda.is_available(): DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1" HF_TOKEN = os.getenv("HF_TOKEN") CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1" MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512")) MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048")) USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1" ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1" OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs") MODEL = os.getenv( "MODEL", "https://huggingface.co/AstraliteHeart/pony-diffusion-v6/blob/main/v6.safetensors", ) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Load pipeline function remains unchanged def parse_json_parameters(json_str): try: params = json.loads(json_str) return params except json.JSONDecodeError: return None def apply_json_parameters(json_str): params = parse_json_parameters(json_str) if params: return ( params.get("prompt", ""), params.get("negative_prompt", ""), params.get("seed", 0), params.get("width", 1024), params.get("height", 1024), params.get("guidance_scale", 7.0), params.get("num_inference_steps", 30), params.get("sampler", "DPM++ 2M SDE Karras"), params.get("aspect_ratio", "1024 x 1024"), params.get("use_upscaler", False), params.get("upscaler_strength", 0.55), params.get("upscale_by", 1.5), ) return [gr.update()] * 12 def generate( prompt: str, negative_prompt: str = "", seed: int = 0, custom_width: int = 1024, custom_height: int = 1024, guidance_scale: float = 7.0, num_inference_steps: int = 30, sampler: str = "DPM++ 2M SDE Karras", aspect_ratio_selector: str = "1024 x 1024", use_upscaler: bool = False, upscaler_strength: float = 0.55, upscale_by: float = 1.5, progress=gr.Progress(track_tqdm=True), ) -> Image: # Existing generate function code... # Update history after generation history = gr.get_state("history") or [] history.insert(0, {"prompt": prompt, "image": images[0], "metadata": metadata}) gr.set_state("history", history[:10]) # Keep only the last 10 entries return images, metadata, gr.update(choices=[h["prompt"] for h in history]) def get_random_prompt(): return random.choice(config.examples) with gr.Blocks(css="style.css") as demo: # Existing UI elements... with gr.Accordion(label="JSON Parameters", open=False): json_input = gr.TextArea(label="Input JSON parameters") apply_json_button = gr.Button("Apply JSON Parameters") with gr.Row(): clear_button = gr.Button("Clear All") random_prompt_button = gr.Button("Random Prompt") history_dropdown = gr.Dropdown(label="Generation History", choices=[], interactive=True) # Connect components apply_json_button.click( fn=apply_json_parameters, inputs=json_input, outputs=[prompt, negative_prompt, seed, custom_width, custom_height, guidance_scale, num_inference_steps, sampler, aspect_ratio_selector, use_upscaler, upscaler_strength, upscale_by] ) clear_button.click( fn=lambda: (gr.update(value=""), gr.update(value=""), gr.update(value=0), gr.update(value=1024), gr.update(value=1024), gr.update(value=7.0), gr.update(value=30), gr.update(value="DPM++ 2M SDE Karras"), gr.update(value="1024 x 1024"), gr.update(value=False), gr.update(value=0.55), gr.update(value=1.5)), inputs=[], outputs=[prompt, negative_prompt, seed, custom_width, custom_height, guidance_scale, num_inference_steps, sampler, aspect_ratio_selector, use_upscaler, upscaler_strength, upscale_by] ) random_prompt_button.click( fn=get_random_prompt, inputs=[], outputs=prompt ) history_dropdown.change( fn=lambda x: gr.update(value=x), inputs=history_dropdown, outputs=prompt ) # Existing event handlers... demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)