|
|
|
import gradio as gr |
|
import random |
|
import spaces |
|
import torch |
|
import uuid |
|
import os |
|
from transformers import pipeline |
|
|
|
from diffusers import StableDiffusionXLPipeline, ControlNetModel |
|
from diffusers.models import AutoencoderKL |
|
from PIL import Image |
|
|
|
|
|
DEVICE = "auto" |
|
if DEVICE == "auto": |
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"[SYSTEM] | Using {DEVICE} type compute device.") |
|
|
|
|
|
MAX_SEED = 9007199254740991 |
|
DEFAULT_INPUT = "" |
|
DEFAULT_NEGATIVE_INPUT = "EasyNegative, (bad), [abstract], deformed, distorted, disfigured, disconnected, disgusting, displeasing, mutation, mutated, blur, blurry, fewer, extra, missing, unfinished, scribble, lowres, low quality, jpeg artifacts, chromatic aberration, extra digits, artistic error, text, error, username, scan, signature, watermark, ugly, amputation, limb, limbs, leg, legs, foot, feet, toe, toes, arm, arms, hand, hands, finger, fingers, head, heads, exposed, explicit, porn, nude, nudity, naked, nsfw" |
|
DEFAULT_MODEL = "Default" |
|
DEFAULT_HEIGHT = 1024 |
|
DEFAULT_WIDTH = 1024 |
|
|
|
css = ''' |
|
.gradio-container{max-width: 560px !important} |
|
h1{text-align:center} |
|
footer { |
|
visibility: hidden |
|
} |
|
''' |
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
controlnet = ControlNetModel.from_pretrained("MakiPan/controlnet-encoded-hands-130k", torch_dtype=torch.float16) |
|
|
|
repo_default = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash", vae=vae, controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False) |
|
repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base") |
|
repo_default.set_adapters(["base"], adapter_weights=[0.7]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
repo_customs = { |
|
"Default": repo_default, |
|
"Realistic": None, |
|
"Anime": None, |
|
"Pixel": None, |
|
"Large": None, |
|
} |
|
|
|
|
|
def save_image(img, seed): |
|
name = f"{seed}-{uuid.uuid4()}.png" |
|
img.save(name) |
|
return name |
|
|
|
def get_seed(seed): |
|
seed = seed.strip() |
|
if seed.isdigit(): |
|
return int(seed) |
|
else: |
|
return random.randint(0, MAX_SEED) |
|
|
|
@spaces.GPU(duration=60) |
|
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None): |
|
|
|
repo = repo_customs[model or "Default"] |
|
filter_input = filter_input or "" |
|
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT |
|
steps_set = steps |
|
guidance_set = guidance |
|
seed = get_seed(seed) |
|
|
|
print(input, filter_input, negative_input, model, height, width, steps, guidance, number, seed) |
|
|
|
if model == "Realistic": |
|
steps_set = 35 |
|
guidance_set = 7 |
|
elif model == "Anime": |
|
steps_set = 35 |
|
guidance_set = 7 |
|
elif model == "Pixel": |
|
steps_set = 15 |
|
guidance_set = 1.5 |
|
elif model == "Large": |
|
steps_set = 20 |
|
guidance_set = 7 |
|
else: |
|
steps_set = 20 |
|
guidance_set = 3 |
|
|
|
if not steps or steps < 0: |
|
steps = steps_set |
|
if not guidance or guidance < 0: |
|
guidance = guidance_set |
|
|
|
print(steps, guidance) |
|
|
|
repo.to(DEVICE) |
|
|
|
parameters = { |
|
"prompt": input, |
|
"negative_prompt": filter_input + negative_input, |
|
"height": height, |
|
"width": width, |
|
"num_inference_steps": steps, |
|
"guidance_scale": guidance, |
|
"num_images_per_prompt": number, |
|
"controlnet_conditioning_scale": 1, |
|
"cross_attention_kwargs": {"scale": 1}, |
|
"generator": torch.Generator().manual_seed(seed), |
|
"use_resolution_binning": True, |
|
"output_type":"pil", |
|
} |
|
|
|
images = repo(**parameters).images |
|
image_paths = [save_image(img, seed) for img in images] |
|
|
|
print(image_paths) |
|
|
|
classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")(Image.open(image_paths[0])) |
|
|
|
print(classifier) |
|
|
|
return image_paths, classifier[0] |
|
|
|
def cloud(): |
|
print("[CLOUD] | Space maintained.") |
|
|
|
|
|
|
|
with gr.Blocks(css=css) as main: |
|
with gr.Column(): |
|
gr.Markdown("🪄 Generate high quality images on all styles between 10 to 20 seconds.") |
|
|
|
with gr.Column(): |
|
input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input") |
|
filter_input = gr.Textbox(lines=1, value="", label="Input Filter") |
|
negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative") |
|
model = gr.Dropdown(label="Models", choices=repo_customs.keys(), value="Default") |
|
height = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height") |
|
width = gr.Slider(minimum=1, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width") |
|
steps = gr.Slider(minimum=-1, maximum=100, step=1, value=-1, label="Steps") |
|
guidance = gr.Slider(minimum=-1, maximum=100, step=0.001, value=-1, label = "Guidance") |
|
number = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Number") |
|
seed = gr.Textbox(lines=1, value="", label="Seed (Blank for random)") |
|
submit = gr.Button("▶") |
|
maintain = gr.Button("☁️") |
|
|
|
with gr.Column(): |
|
images = gr.Gallery(columns=1, label="Image") |
|
classifier = gr.Label() |
|
|
|
submit.click(generate, inputs=[input, filter_input, negative_input, model, height, width, steps, guidance, number, seed], outputs=[images, classifier], queue=False) |
|
maintain.click(cloud, inputs=[], outputs=[], queue=False) |
|
|
|
main.launch(show_api=True) |