text2image_1 / app.py
RanM's picture
Update app.py
a7e869d verified
raw
history blame
1.65 kB
import gradio as gr
import torch
from diffusers import DiffusionPipeline, AutoPipelineForText2Image
import base64
from io import BytesIO
def load_amused_model():
# pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO")
# AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
# AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
return DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
safety_checker = None,
requires_safety_checker = False)
# Generate image from prompt using AmusedPipeline
def generate_image(prompt):
try:
pipe = load_amused_model()
generator = torch.Generator().manual_seed(8) # Create a generator for reproducibility
image = pipe(prompt, generator=generator).images[0] # Generate image from prompt
# image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
return image, None
except Exception as e:
return None, str(e)
def inference(prompt):
print(f"Received prompt: {prompt}") # Debugging statement
image, error = generate_image(prompt)
if error:
print(f"Error generating image: {error}") # Debugging statement
return "Error: " + error
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
gradio_interface = gr.Interface(
fn=inference,
inputs="text",
outputs="text" # Change output to text to return base64 string
)
if __name__ == "__main__":
gradio_interface.launch()