Spaces:
Runtime error
Runtime error
File size: 3,301 Bytes
dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 215569d 8eb9d7a 215569d 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a dbb04fd 8eb9d7a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import streamlit as st
import torch
import numpy as np
from PIL import Image
import random
import uuid
from diffusers import PixArtAlphaPipeline
# Check for CUDA availability
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# # Load the PixArtAlphaPipeline
# if torch.cuda.is_available():
# pipe = PixArtAlphaPipeline.from_pretrained(
# "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
# torch_dtype=torch.float16,
# use_safetensors=True,
# )
# pipe.to(device)
# st.write("Model loaded successfully!")
# else:
# st.error("This demo requires GPU support, which is not available on this system.")
# Load the PixArtAlphaPipeline
pipe = PixArtAlphaPipeline.from_pretrained(
"PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe.to(device)
st.write("Model loaded successfully!")
# Constants
MAX_SEED = np.iinfo(np.int32).max
# Function to save image and return the path
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
# Main function for image generation
def generate_image(prompt, style, use_negative_prompt, negative_prompt, seed, width, height, inference_steps):
generator = torch.Generator().manual_seed(seed)
# Apply the selected style
if style == "(No style)":
prompt_text = prompt
else:
prompt_text, _ = apply_style(style, prompt, negative_prompt)
# Generate the image
images = pipe(
prompt=prompt_text,
negative_prompt=None,
width=width,
height=height,
guidance_scale=0,
num_inference_steps=inference_steps,
generator=generator,
num_images_per_prompt=1,
use_resolution_binning=True,
output_type="pil",
).images
# Save the image and display
if images:
img_path = save_image(images[0])
img = Image.open(img_path)
st.image(img, caption="Generated Image", use_column_width=True)
st.success("Image generated successfully!")
else:
st.error("Failed to generate image. Please try again.")
# Helper function to apply selected style
def apply_style(style_name, positive, negative):
# Define styles dictionary (similar to your Gradio code)
styles = {
"(No style)": (positive, ""),
"Cinematic": ("cinematic still " + positive, "anime, cartoon, ..."),
"Realistic": ("Photorealistic " + positive, "drawing, painting, ..."),
# Add other styles here...
}
return styles.get(style_name, styles["(No style)"])
# Streamlit UI
st.title("Instant Image Generator")
prompt = st.text_input("Prompt", "Enter your prompt")
style_names = ["(No style)", "Cinematic", "Realistic"] # Add other styles here...
style = st.selectbox("Image Style", style_names)
use_negative_prompt = st.checkbox("Use negative prompt")
negative_prompt = st.text_input("Negative prompt", "")
seed = st.slider("Seed", 0, MAX_SEED, 0)
width = st.slider("Width", 256, 4192, 1024, step=32)
height = st.slider("Height", 256, 4192, 1024, step=32)
inference_steps = st.slider("Steps", 4, 20, 4)
if st.button("Generate Image"):
generate_image(prompt, style, use_negative_prompt, negative_prompt, seed, width, height, inference_steps)
|