Sergidev's picture
v3p1
bc0df63 verified
raw
history blame
9.64 kB
import os
import gc
import random
import gradio as gr
import numpy as np
import torch
import json
import spaces
import config
import utils
import logging
from PIL import Image, PngImagePlugin
from datetime import datetime
from diffusers.models import AutoencoderKL
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
# ... (keep all the imports and initial setup)
# ... (keep all the functions like load_pipeline, parse_json_parameters, apply_json_parameters, generate, get_random_prompt)
if torch.cuda.is_available():
pipe = load_pipeline(MODEL)
logger.info("Loaded on Device!")
else:
pipe = None
# Define the JavaScript code as a string
js_code = """
<script>
document.addEventListener('DOMContentLoaded', (event) => {
const historyDropdown = document.getElementById('history-dropdown');
const resultGallery = document.querySelector('.gallery');
if (historyDropdown && resultGallery) {
const observer = new MutationObserver((mutations) => {
mutations.forEach((mutation) => {
if (mutation.type === 'childList' && mutation.addedNodes.length > 0) {
const newImage = mutation.addedNodes[0];
if (newImage.tagName === 'IMG') {
updateHistory(newImage.src);
}
}
});
});
observer.observe(resultGallery, { childList: true });
function updateHistory(imageSrc) {
const prompt = document.querySelector('#prompt textarea').value;
const option = document.createElement('option');
option.value = prompt;
option.textContent = prompt;
option.setAttribute('data-image', imageSrc);
historyDropdown.insertBefore(option, historyDropdown.firstChild);
if (historyDropdown.children.length > 10) {
historyDropdown.removeChild(historyDropdown.lastChild);
}
}
historyDropdown.addEventListener('change', (event) => {
const selectedOption = event.target.selectedOptions[0];
const imageSrc = selectedOption.getAttribute('data-image');
if (imageSrc) {
const img = document.createElement('img');
img.src = imageSrc;
resultGallery.innerHTML = '';
resultGallery.appendChild(img);
}
});
}
});
</script>
"""
with gr.Blocks(css="style.css") as demo:
gr.HTML(js_code) # Add the JavaScript code to the interface
title = gr.HTML(
f"""<h1><span>{DESCRIPTION}</span></h1>""",
elem_id="title",
)
gr.Markdown(
f"""Gradio demo for [Pony Diffusion V6](https://civitai.com/models/257749/pony-diffusion-v6-xl/)""",
elem_id="subtitle",
)
gr.DuplicateButton(
value="Duplicate Space for private use",
elem_id="duplicate-button",
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
)
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=5,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button(
"Generate",
variant="primary",
scale=0
)
result = gr.Gallery(
label="Result",
columns=1,
preview=True,
show_label=False
)
with gr.Accordion(label="Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative Prompt",
max_lines=5,
placeholder="Enter a negative prompt",
value=""
)
aspect_ratio_selector = gr.Radio(
label="Aspect Ratio",
choices=config.aspect_ratios,
value="1024 x 1024",
container=True,
)
with gr.Group(visible=False) as custom_resolution:
with gr.Row():
custom_width = gr.Slider(
label="Width",
minimum=MIN_IMAGE_SIZE,
maximum=MAX_IMAGE_SIZE,
step=8,
value=1024,
)
custom_height = gr.Slider(
label="Height",
minimum=MIN_IMAGE_SIZE,
maximum=MAX_IMAGE_SIZE,
step=8,
value=1024,
)
use_upscaler = gr.Checkbox(label="Use Upscaler", value=False)
with gr.Row() as upscaler_row:
upscaler_strength = gr.Slider(
label="Strength",
minimum=0,
maximum=1,
step=0.05,
value=0.55,
visible=False,
)
upscale_by = gr.Slider(
label="Upscale by",
minimum=1,
maximum=1.5,
step=0.1,
value=1.5,
visible=False,
)
sampler = gr.Dropdown(
label="Sampler",
choices=config.sampler_list,
interactive=True,
value="DPM++ 2M SDE Karras",
)
with gr.Row():
seed = gr.Slider(
label="Seed", minimum=0, maximum=utils.MAX_SEED, step=1, value=0
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Group():
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=1,
maximum=12,
step=0.1,
value=7.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
with gr.Accordion(label="JSON Parameters", open=False):
json_input = gr.TextArea(label="Input JSON parameters")
apply_json_button = gr.Button("Apply JSON Parameters")
with gr.Row():
clear_button = gr.Button("Clear All")
random_prompt_button = gr.Button("Random Prompt")
history_dropdown = gr.Dropdown(label="Generation History", choices=[], interactive=True, elem_id="history-dropdown")
with gr.Accordion(label="Generation Parameters", open=False):
gr_metadata = gr.JSON(label="Metadata", show_label=False)
gr.Examples(
examples=config.examples,
inputs=prompt,
outputs=[result, gr_metadata],
fn=lambda *args, **kwargs: generate(*args, use_upscaler=True, **kwargs),
cache_examples=CACHE_EXAMPLES,
)
use_upscaler.change(
fn=lambda x: [gr.update(visible=x), gr.update(visible=x)],
inputs=use_upscaler,
outputs=[upscaler_strength, upscale_by],
queue=False,
api_name=False,
)
aspect_ratio_selector.change(
fn=lambda x: gr.update(visible=x == "Custom"),
inputs=aspect_ratio_selector,
outputs=custom_resolution,
queue=False,
api_name=False,
)
inputs = [
prompt,
negative_prompt,
seed,
custom_width,
custom_height,
guidance_scale,
num_inference_steps,
sampler,
aspect_ratio_selector,
use_upscaler,
upscaler_strength,
upscale_by,
]
prompt.submit(
fn=utils.randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=[result, gr_metadata, history_dropdown],
api_name="run",
)
negative_prompt.submit(
fn=utils.randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=[result, gr_metadata, history_dropdown],
api_name=False,
)
run_button.click(
fn=utils.randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=[result, gr_metadata, history_dropdown],
api_name=False,
)
apply_json_button.click(
fn=apply_json_parameters,
inputs=json_input,
outputs=[prompt, negative_prompt, seed, custom_width, custom_height,
guidance_scale, num_inference_steps, sampler,
aspect_ratio_selector, use_upscaler, upscaler_strength, upscale_by]
)
clear_button.click(
fn=lambda: (gr.update(value=""), gr.update(value=""), gr.update(value=0),
gr.update(value=1024), gr.update(value=1024),
gr.update(value=7.0), gr.update(value=30),
gr.update(value="DPM++ 2M SDE Karras"),
gr.update(value="1024 x 1024"), gr.update(value=False),
gr.update(value=0.55), gr.update(value=1.5)),
inputs=[],
outputs=[prompt, negative_prompt, seed, custom_width, custom_height,
guidance_scale, num_inference_steps, sampler,
aspect_ratio_selector, use_upscaler, upscaler_strength, upscale_by]
)
random_prompt_button.click(
fn=get_random_prompt,
inputs=[],
outputs=prompt
)
history_dropdown.change(
fn=lambda x: gr.update(value=x),
inputs=history_dropdown,
outputs=prompt
)
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)