text2image_1 / app.py
RanM's picture
Update app.py
a9b8939 verified
raw
history blame
1.07 kB
import gradio as gr
import torch
from diffusers import AutoPipelineForText2Image
import base64
from io import BytesIO
# Load the model once outside of the function
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
def generate_image(prompt):
try:
image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
return image, None
except Exception as e:
return None, str(e)
def inference(prompt):
print(f"Received prompt: {prompt}")
# Debugging statement
image, error = generate_image(prompt)
if error:
print(f"Error generating image: {error}")
# Debugging statement
return "Error: " + error
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
gradio_interface = gr.Interface(
fn=inference,
inputs="text",
outputs="text" # Change output to text to return base64 string
)
if __name__ == "__main__":
gradio_interface.launch()