|
|
|
import gradio as gr |
|
import random |
|
import spaces |
|
import torch |
|
import uuid |
|
import os |
|
|
|
from diffusers import StableDiffusionXLPipeline, ControlNetModel |
|
from diffusers.models import AutoencoderKL |
|
|
|
|
|
DEVICE = "auto" |
|
if DEVICE == "auto": |
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"[SYSTEM] | Using {DEVICE} type compute device.") |
|
|
|
|
|
MAX_SEED = 9007199254740991 |
|
DEFAULT_INPUT = "" |
|
DEFAULT_NEGATIVE_INPUT = "EasyNegative, (bad), [abstract], deformed, distorted, disfigured, disconnected, disgusting, displeasing, mutation, mutated, blur, blurry, fewer, extra, missing, unfinished, scribble, lowres, low quality, jpeg artifacts, chromatic aberration, extra digits, artistic error, text, error, username, scan, signature, watermark, ugly, amputation, limb, limbs, leg, legs, foot, feet, toe, toes, arm, arms, hand, hands, finger, fingers, head, heads, exposed, explicit, porn, nude, nudity, naked, nsfw" |
|
DEFAULT_MODEL = "Default" |
|
DEFAULT_HEIGHT = 1024 |
|
DEFAULT_WIDTH = 1024 |
|
|
|
css = ''' |
|
.gradio-container{max-width: 560px !important} |
|
h1{text-align:center} |
|
footer { |
|
visibility: hidden |
|
} |
|
''' |
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16) |
|
|
|
repo_default = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False) |
|
repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base") |
|
repo_default.set_adapters(["base"], adapter_weights=[0.7]) |
|
|
|
repo_customs = { |
|
"Default": repo_default, |
|
"Realistic": StableDiffusionXLPipeline.from_pretrained("stablediffusionapi/NightVision_XL", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=False, add_watermarker=False), |
|
"Anime": StableDiffusionXLPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False), |
|
"Pixel": StableDiffusionXLPipeline.from_pretrained("PublicPrompts/All-In-One-Pixel-Model", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False), |
|
} |
|
|
|
|
|
def save_image(img, seed): |
|
name = f"{seed}-{uuid.uuid4()}.png" |
|
img.save(name) |
|
return name |
|
|
|
def get_seed(seed): |
|
seed = seed.strip() |
|
if seed.isdigit(): |
|
return int(seed) |
|
else: |
|
return random.randint(0, MAX_SEED) |
|
|
|
@spaces.GPU(duration=30) |
|
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None): |
|
|
|
repo = repo_customs[model or "Default"] |
|
filter_input = filter_input or "" |
|
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT |
|
seed = get_seed(seed) |
|
|
|
print(input, filter_input, negative_input, model, height, width, steps, guidance, number, seed) |
|
|
|
if model == "Realistic": |
|
steps = (not steps or steps < 0 and 30) or steps |
|
guidance = (not guidance or guidance < 0 and 7) or guidance |
|
elif model == "Anime": |
|
steps = (not steps or steps < 0 and 16) or steps |
|
guidance = (not guidance or guidance < 0 and 7) or guidance |
|
elif model == "Pixel": |
|
steps = (not steps or steps < 0 and 8) or steps |
|
guidance = (not guidance or guidance < 0 and 3) or guidance |
|
else: |
|
steps = (not steps or steps < 0 and 16) or steps |
|
guidance = (not guidance or guidance < 0 and 3) or guidance |
|
|
|
repo.to(DEVICE) |
|
|
|
parameters = { |
|
"prompt": input, |
|
"negative_prompt": filter_input + negative_input, |
|
"height": height, |
|
"width": width, |
|
"num_inference_steps": steps, |
|
"guidance_scale": guidance, |
|
"num_images_per_prompt": number, |
|
"controlnet_conditioning_scale": 1, |
|
"cross_attention_kwargs": {"scale": 1}, |
|
"generator": torch.Generator().manual_seed(seed), |
|
"use_resolution_binning": True, |
|
"output_type":"pil", |
|
} |
|
|
|
images = repo(**parameters).images |
|
image_paths = [save_image(img, seed) for img in images] |
|
print(image_paths) |
|
return image_paths |
|
|
|
def cloud(): |
|
print("[CLOUD] | Space maintained.") |
|
|
|
|
|
|
|
with gr.Blocks(css=css) as main: |
|
with gr.Column(): |
|
gr.Markdown("🪄 Generate high quality images on all styles between 10 to 20 seconds.") |
|
|
|
with gr.Column(): |
|
input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input") |
|
filter_input = gr.Textbox(lines=1, value="", label="Input Filter") |
|
negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative") |
|
model = gr.Dropdown(label="Models", choices=["Default", "Realistic", "Anime", "Pixel"], value="Default") |
|
height = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height") |
|
width = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width") |
|
steps = gr.Slider(minimum=-1, maximum=100, step=1, value=-1, label="Steps") |
|
guidance = gr.Slider(minimum=-1, maximum=100, step=0.001, value=-1, label = "Guidance") |
|
number = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Number") |
|
seed = gr.Textbox(lines=1, value="", label="Seed (Blank for random)") |
|
submit = gr.Button("▶") |
|
maintain = gr.Button("☁️") |
|
|
|
with gr.Column(): |
|
images = gr.Gallery(columns=1, label="Image") |
|
|
|
submit.click(generate, inputs=[input, filter_input, negative_input, model, height, width, steps, guidance, number, seed], outputs=[images], queue=False) |
|
maintain.click(cloud, inputs=[], outputs=[], queue=False) |
|
|
|
main.launch(show_api=True) |