Instant-Image / app.py
KingNish's picture
Update app.py
6ea4158 verified
raw
history blame
9.74 kB
from __future__ import annotations
import os
import random
import uuid
from typing import Tuple
import gradio as gr
import numpy as np
import torch
from diffusers import LCMScheduler, PixArtAlphaPipeline
# Use a more descriptive variable name
MODEL_NAME = "PixArt-alpha/PixArt-LCM-XL-2-1024-MS"
# Move environment variable checks and definitions to the top for better readability
DESCRIPTION = """# Instant Image
### Super fast text to Image Generator.
### <span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.
### First Image processing takes time then images generate faster.
"""
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4192"))
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
PORT = int(os.getenv("DEMO_PORT", "15432"))
# Check CUDA availability early on
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
# Cache examples only if CUDA is available
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
MAX_SEED = np.iinfo(np.int32).max
NUM_IMAGES_PER_PROMPT = 1
# Use Enum for better style management
from enum import Enum
class Style(Enum):
NO_STYLE = ("(No style)", "{prompt}", "")
CINEMATIC = ("Cinematic", "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured")
REALISTIC = ("Realistic", "Photorealistic {prompt} . Ulta-realistic, professional, 4k, highly detailed", "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly, disfigured")
ANIME = ("Anime", "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed", "photo, deformed, black and white, realism, disfigured, low contrast")
DIGITAL_ART = ("Digital Art", "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed", "photo, photorealistic, realism, ugly")
PIXEL_ART = ("Pixel art", "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics", "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic")
FANTASY_ART = ("Fantasy art", "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white")
THREE_D_MODEL = ("3D Model", "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting", "ugly, deformed, noisy, low poly, blurry, painting")
def __init__(self, name, prompt, negative_prompt):
self.name = name
self.prompt = prompt
self.negative_prompt = negative_prompt
# Use the Enum values directly
styles = {style.name: (style.prompt, style.negative_prompt) for style in Style}
STYLE_NAMES = list(styles.keys())
DEFAULT_STYLE_NAME = Style.NO_STYLE.name
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load the pipeline only if CUDA is available
if torch.cuda.is_available():
pipe = PixArtAlphaPipeline.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16,
use_safetensors=True,
)
if os.getenv('CONSISTENCY_DECODER', False):
print("Using DALL-E 3 Consistency Decoder")
# Assuming ConsistencyDecoderVAE is defined elsewhere
pipe.vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
if ENABLE_CPU_OFFLOAD:
pipe.enable_model_cpu_offload()
else:
pipe.to(device)
print("Loaded on Device!")
# Speed-up T5
pipe.text_encoder.to_bettertransformer()
if USE_TORCH_COMPILE:
pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True)
print("Model Compiled!")
def save_image(img):
# Generate image names in a temporary directory
os.makedirs("tmp", exist_ok=True)
unique_name = os.path.join("tmp", f"{uuid.uuid4()}.png")
img.save(unique_name)
return unique_name
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
return random.randint(0, MAX_SEED) if randomize_seed else seed
# No need to use @spaces.GPU if you're checking CUDA availability within the function
def generate(
prompt: str,
negative_prompt: str = "",
style: str = DEFAULT_STYLE_NAME,
use_negative_prompt: bool = False,
seed: int = 0,
width: int = 1024,
height: int = 1024,
inference_steps: int = 8,
randomize_seed: bool = False,
use_resolution_binning: bool = True,
progress=gr.Progress(track_tqdm=True),
):
if not torch.cuda.is_available():
return "This demo requires a GPU to run.", seed
seed = int(randomize_seed_fn(seed, randomize_seed))
generator = torch.Generator().manual_seed(seed)
if not use_negative_prompt:
negative_prompt = None
prompt, negative_prompt = styles.get(style, styles[DEFAULT_STYLE_NAME])
prompt = prompt.replace("{prompt}", prompt)
images = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=0,
num_inference_steps=inference_steps,
generator=generator,
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
use_resolution_binning=use_resolution_binning,
output_type="pil",
).images
image_paths = [save_image(img) for img in images]
return image_paths, seed
examples = [
"A Monkey with a happy face in the Sahara desert.",
"Eiffel Tower was Made up of ICE.",
"Color photo of a corgi made of transparent glass, standing on the riverside in Yosemite National Park.",
"A close-up photo of a woman. She wore a blue coat with a gray dress underneath and has blue eyes.",
"A litter of golden retriever puppies playing in the snow. Their heads pop out of the snow, covered in.",
"an astronaut sitting in a diner, eating fries, cinematic, analog film",
]
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
with gr.Row(equal_height=False):
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Gallery(label="Result", columns=1, show_label=False)
with gr.Accordion("Advanced options", open=False):
with gr.Group():
with gr.Row():
use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=True)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
)
style_selection = gr.Radio(
show_label=True,
container=True,
interactive=True,
choices=STYLE_NAMES,
value=DEFAULT_STYLE_NAME,
label="Image Style",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row(visible=True):
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
inference_steps = gr.Slider(
label="Steps",
minimum=4,
maximum=20,
step=1,
value=4,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=[result, seed],
fn=generate,
cache_examples=CACHE_EXAMPLES,
)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
api_name=False,
)
gr.on(
triggers=[
prompt.submit,
negative_prompt.submit,
run_button.click,
],
batch=True,
max_batch_size=10,
fn=generate,
inputs=[
prompt,
negative_prompt,
style_selection,
use_negative_prompt,
seed,
width,
height,
inference_steps,
randomize_seed,
],
outputs=[result, seed],
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=200).launch(server_port=PORT)