Spaces:
Runtime error
Runtime error
File size: 1,292 Bytes
f466dd9 bf161ed 9e09422 f466dd9 6d1d03a bf161ed d07e1c0 bf161ed f466dd9 bf161ed f466dd9 9aa8b10 f466dd9 9aa8b10 f466dd9 bf161ed 9e09422 f466dd9 9e09422 f466dd9 bf161ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
import torch
from diffusers import DiffusionPipeline, AutoPipelineForText2Image
import base64
from io import BytesIO
def text_to_image_model():
# AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
# AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
return AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
# Generate image from prompt using AmusedPipeline
def generate_image(prompt):
try:
pipe = text_to_image_model()
image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
return image, None
except Exception as e:
return None, str(e)
def inference(prompt):
print(f"Received prompt: {prompt}") # Debugging statement
image, error = generate_image(prompt)
if error:
print(f"Error generating image: {error}") # Debugging statement
return "Error: " + error
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
gradio_interface = gr.Interface(
fn=inference,
inputs="text",
outputs="text" # Change output to text to return base64 string
)
if __name__ == "__main__":
gradio_interface.launch() |