Spaces:
Running
Running
import gradio as gr | |
import requests | |
import io | |
import random | |
import os | |
from PIL import Image | |
list_models = [ | |
"SDXL-1.0", | |
"SD-1.5", | |
"OpenJourney-V4", | |
"Anything-V4", | |
"Disney-Pixar-Cartoon", | |
"Pixel-Art-XL", | |
"Dalle-3-XL", | |
"Midjourney-V4-XL", | |
] | |
def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7, | |
seed=None): | |
if current_model == "SD-1.5": | |
API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" | |
elif current_model == "SDXL-1.0": | |
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0" | |
elif current_model == "OpenJourney-V4": | |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney" | |
elif current_model == "Anything-V4": | |
API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0" | |
elif current_model == "Disney-Pixar-Cartoon": | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon" | |
elif current_model == "Pixel-Art-XL": | |
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl" | |
elif current_model == "Dalle-3-XL": | |
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" | |
elif current_model == "Midjourney-V4-XL": | |
API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl" | |
API_TOKEN = os.environ.get("HF_READ_TOKEN") | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
if image_style == "None style": | |
payload = { | |
"inputs": prompt + ", 8k", | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed is not None else random.randint(-1, 2147483647) | |
} | |
elif image_style == "Cinematic": | |
payload = { | |
"inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko", | |
"is_negative": is_negative + ", abstract, cartoon, stylized", | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed is not None else random.randint(-1, 2147483647) | |
} | |
elif image_style == "Digital Art": | |
payload = { | |
"inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star", | |
"is_negative": is_negative + ", sharp , modern , bright", | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed is not None else random.randint(-1, 2147483647) | |
} | |
elif image_style == "Portrait": | |
payload = { | |
"inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)", | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed is not None else random.randint(-1, 2147483647) | |
} | |
image_bytes = requests.post(API_URL, headers=headers, json=payload).content | |
image = Image.open(io.BytesIO(image_bytes)) | |
return image | |
import gradio as gr | |
css = """ | |
/* General Container Styles */ | |
.gradio-container { | |
font-family: 'IBM Plex Sans', sans-serif; | |
max-width: 800px !important; | |
margin: auto; | |
padding-top: 2rem; | |
} | |
/* Button Styles */ | |
.gradio-button-primary { | |
color: #fff; | |
background-color: #2563eb; | |
border-color: #1d4ed8; | |
} | |
.gradio-button-primary:hover { | |
background-color: #1d4ed8; | |
border-color: #1d4ed8; | |
} | |
/* Input Styles */ | |
.gradio-textbox { | |
border-color: #a0aec0; | |
padding: 9px 12px; | |
} | |
.gradio-textbox:focus { | |
border-color: #2563eb; | |
box-shadow: 0 0 0 2px rgba(37,99,235,.25); | |
} | |
/* Output Image Styles */ | |
.gradio-output img { | |
max-width: 100%; | |
height: auto; | |
border-radius: 6px; | |
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06); | |
} | |
""" | |
def generate_txt2img(current_model, text_prompt, negative_prompt, image_style): | |
# Your generation code here | |
pass | |
favicon = '<img src="" width="48px" style="display: inline">' | |
title = f"""<h1><center>{favicon} AI Diffusion</center></h1>""" | |
markdown_title = gr.Markdown(title) | |
current_model = gr.inputs.Dropdown(label="Current Model", choices=list_models, value=list_models[1]) | |
text_prompt = gr.inputs.Textbox(label="Prompt", placeholder="Enter a prompt", lines=1) | |
negative_prompt = gr.inputs.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1) | |
image_style = gr.inputs.Dropdown(label="Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style") | |
generate_button = gr.outputs.Button(label="Generate", type="button") | |
image_output = gr.outputs.Image(label="Output Image") | |
with gr.Blocks(css=css) as demo: | |
gr.Grid(col_width="auto", col_gap="10px").push( | |
markdown_title, | |
current_model, | |
text_prompt, | |
negative_prompt, | |
image_style, | |
generate_button, | |
image_output | |
) | |
generate_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output) | |
demo.launch(show_api=False) |