aidiffusion / app.py
ngoctuanai's picture
Update app.py
44aa685
raw
history blame
2.34 kB
import gradio as gr
import requests
import io
import random
import os
from PIL import Image
list_models = [
"SDXL-1.0",
"SD-1.5",
"OpenJourney-V4",
"Anything-V4",
"Disney-Pixar-Cartoon",
"Pixel-Art-XL",
"Dalle-3-XL",
"Midjourney-V4-XL",
]
# Your generate_txt2img function
def generate_txt2img(current_model, prompt, is_negative, image_style, steps, cfg_scale, seed):
# ... (Your function implementation)
# Enhanced CSS for better styling
css = """
body {
font-family: 'Helvetica Neue', sans-serif;
background-color: #f7f8fa;
}
.gradio-container {
max-width: 800px;
margin: auto;
padding-top: 1rem;
padding-bottom: 1rem;
border-radius: 10px;
background-color: white;
box-shadow: 0 0 20px rgba(0,0,0,0.1);
}
.gradio-row {
margin-bottom: 1rem;
}
.gradio-button, .gradio-dropdown, .gradio-textbox {
border-radius: 5px;
}
.gradio-button {
background-color: #4CAF50; /* Green */
color: white;
}
.gradio-label {
display: block;
margin-bottom: 5px;
font-weight: bold;
}
"""
# Gradio interface setup
with gr.Blocks(css=css) as demo:
gr.Markdown("# AI Image Generation")
with gr.Row():
current_model = gr.Dropdown(label="Select Model", choices=list_models, value=list_models[0])
with gr.Row():
text_prompt = gr.Textbox(label="Enter Prompt", placeholder="Describe the image you want to generate", lines=2)
text_button = gr.Button("Generate Image")
with gr.Row():
image_output = gr.Image(type="pil", label="Generated Image")
with gr.Accordion("Advanced settings"):
with gr.Column():
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter elements to avoid in the image", lines=2)
image_style = gr.Dropdown(label="Select Style", choices=["None", "Cinematic", "Digital Art", "Portrait"], value="None")
steps = gr.Slider(minimum=1, maximum=100, value=50, label="Steps")
cfg_scale = gr.Slider(minimum=1, maximum=20, value=7, label="CFG Scale")
seed = gr.Number(label="Seed", value=random.randint(0, 2**32 - 1))
text_button.click(
func=generate_txt2img,
inputs=[current_model, text_prompt, negative_prompt, image_style, steps, cfg_scale, seed],
outputs=image_output
)
demo.launch()