Image / app.py
Staticaliza's picture
Update app.py
3ada2b6 verified
raw
history blame
6.2 kB
# Imports
import gradio as gr
import requests
import random
import spaces
import torch
import uuid
import os
from diffusers import StableDiffusionXLPipeline, StableDiffusion3Pipeline
from PIL import Image
# Pre-Initialize
DEVICE = "auto"
if DEVICE == "auto":
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
print(f"[SYSTEM] | Using {DEVICE} type compute device.")
# Variables
HF_TOKEN = os.environ.get("HF_TOKEN")
MAX_SEED = 9007199254740991
DEFAULT_INPUT = ""
DEFAULT_NEGATIVE_INPUT = "(bad, ugly, amputation, abstract, blur, blurry, deformed, distorted, disfigured, disconnected, mutation, mutated, low quality, lowres), unfinished, title, text, signature, watermark, (limbs, legs, feet, arms, hands), (porn, nude, naked, nsfw)"
DEFAULT_MODEL = "Default"
DEFAULT_HEIGHT = 1024
DEFAULT_WIDTH = 1024
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HF_TOKEN}" }
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
repo_default = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
#repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
#repo_default.set_adapters(["base"], adapter_weights=[0.7])
repo_pixel = None#StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
#repo_pixel.load_lora_weights("artificialguybr/PixelArtRedmond", adapter_name="base")
#repo_pixel.load_lora_weights("nerijs/pixel-art-xl", adapter_name="base2")
#repo_pixel.set_adapters(["base", "base2"], adapter_weights=[1, 1])
repo_customs = {
"Default": repo_default,
"Realistic": None,#StableDiffusionXLPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
"Anime": None,#StableDiffusionXLPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
"Pixel": None,#repo_pixel,
"Large": None,#StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16, use_safetensors=True),
}
# Functions
def save_image(img, seed):
name = f"{seed}-{uuid.uuid4()}.png"
img.save(name)
return name
def get_seed(seed):
seed = seed.strip()
if seed.isdigit():
return int(seed)
else:
return random.randint(0, MAX_SEED)
def api_classification_request(url, filename, headers):
with open(filename, "rb") as file:
data = file.read()
response = requests.request("POST", url, headers=headers or {}, data=data)
return json.loads(response.content.decode("utf-8"))
@spaces.GPU(duration=60)
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
repo = repo_customs[model or "Default"]
filter_input = filter_input or ""
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT
steps_set = steps
guidance_set = guidance
seed = get_seed(seed)
print(input, filter_input, negative_input, model, height, width, steps, guidance, number, seed)
if model == "Realistic":
steps_set = 25
guidance_set = 7
elif model == "Anime":
steps_set = 25
guidance_set = 7
elif model == "Pixel":
steps_set = 15
guidance_set = 1.5
elif model == "Large":
steps_set = 30
guidance_set = 5
else:
steps_set = 25
guidance_set = 7
if not steps:
steps = steps_set
if not guidance:
guidance = guidance_set
print(steps, guidance)
repo.to(DEVICE)
parameters = {
"prompt": input,
"negative_prompt": filter_input + negative_input,
"height": height,
"width": width,
"num_inference_steps": steps,
"guidance_scale": guidance,
"num_images_per_prompt": number,
"generator": torch.Generator().manual_seed(seed),
"output_type":"pil",
}
images = repo(**parameters).images
image_paths = [save_image(img, seed) for img in images]
print(image_paths)
nsfw_prediction = api_classification_request("https://api-inference.huggingface.co/models/Falconsai/nsfw_image_detection", image_paths[0], headers)
print(nsfw_prediction)
return image_paths, {item['label']: round(item['score'], 3) for item in nsfw_prediction}
def cloud():
print("[CLOUD] | Space maintained.")
# Initialize
with gr.Blocks(css=css) as main:
with gr.Column():
gr.Markdown("🪄 Generate high quality images in all styles.")
with gr.Column():
input = gr.Textbox(lines=1, value=DEFAULT_INPUT, label="Input")
filter_input = gr.Textbox(lines=1, value="", label="Input Filter")
negative_input = gr.Textbox(lines=1, value=DEFAULT_NEGATIVE_INPUT, label="Input Negative")
model = gr.Dropdown(choices=repo_customs.keys(), value="Default", label="Model")
height = gr.Slider(minimum=8, maximum=2160, step=1, value=DEFAULT_HEIGHT, label="Height")
width = gr.Slider(minimum=8, maximum=2160, step=1, value=DEFAULT_WIDTH, label="Width")
steps = gr.Slider(minimum=1, maximum=100, step=1, value=25, label="Steps")
guidance = gr.Slider(minimum=0, maximum=100, step=0.1, value=5, label = "Guidance")
number = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Number")
seed = gr.Textbox(lines=1, value="", label="Seed (Blank for random)")
submit = gr.Button("▶")
maintain = gr.Button("☁️")
with gr.Column():
output = gr.Gallery(columns=1, label="Image")
output_2 = gr.Label()
submit.click(generate, inputs=[input, filter_input, negative_input, model, height, width, steps, guidance, number, seed], outputs=[output, output_2], queue=False)
maintain.click(cloud, inputs=[], outputs=[], queue=False)
main.launch(show_api=True)