Spaces:
Runtime error
Runtime error
# import gradio as gr | |
# import torch | |
# from diffusers import DiffusionPipeline, AutoPipelineForText2Image | |
# import base64 | |
# from io import BytesIO | |
# def load_amused_model(): | |
# # pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO") | |
# # AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo") | |
# # AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo") | |
# return DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", | |
# safety_checker = None, | |
# requires_safety_checker = False) | |
# # Generate image from prompt using AmusedPipeline | |
# def generate_image(prompt): | |
# try: | |
# pipe = load_amused_model() | |
# generator = torch.Generator().manual_seed(8) # Create a generator for reproducibility | |
# image = pipe(prompt, generator=generator).images[0] # Generate image from prompt | |
# # image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0] | |
# return image, None | |
# except Exception as e: | |
# return None, str(e) | |
# def inference(prompt): | |
# print(f"Received prompt: {prompt}") # Debugging statement | |
# image, error = generate_image(prompt) | |
# if error: | |
# print(f"Error generating image: {error}") # Debugging statement | |
# return "Error: " + error | |
# buffered = BytesIO() | |
# image.save(buffered, format="PNG") | |
# img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") | |
# return img_str | |
# gradio_interface = gr.Interface( | |
# fn=inference, | |
# inputs="text", | |
# outputs="text" # Change output to text to return base64 string | |
# ) | |
# if __name__ == "__main__": | |
# gradio_interface.launch() | |
import gradio as gr | |
from diffusers import DiffusionPipeline, DPMSolverSinglestepScheduler | |
import torch | |
import base64 | |
from io import BytesIO | |
def load_amused_model(): | |
# pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO") | |
# AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo") | |
# AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo") | |
return DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32).to("cpu") | |
# Generate image from prompt using AmusedPipeline | |
def generate_image(prompt): | |
try: | |
pipe = load_amused_model() | |
pipe.load_lora_weights( | |
"mann-e/Mann-E_Turbo", | |
weight_name="manne_turbo.safetensors", | |
) | |
# This is equivalent to DPM++ SDE Karras, as noted in https://huggingface.co/docs/diffusers/main/en/api/schedulers/overview | |
pipe.scheduler = DPMSolverSinglestepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True) | |
#generator = torch.Generator().manual_seed(8) # Create a generator for reproducibility | |
#image = pipe(prompt, generator=generator).images[0] # Generate image from prompt | |
# image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0] | |
image = pipe( | |
prompt="a cat in a bustling middle eastern city", | |
num_inference_steps=8, | |
guidance_scale=4, | |
width=768, | |
height=768, | |
clip_skip=1 | |
).images[0] | |
return image, None | |
except Exception as e: | |
return None, str(e) | |
def inference(prompt): | |
print(f"Received prompt: {prompt}") # Debugging statement | |
image, error = generate_image(prompt) | |
if error: | |
print(f"Error generating image: {error}") # Debugging statement | |
return "Error: " + error | |
buffered = BytesIO() | |
image.save(buffered, format="PNG") | |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") | |
return img_str | |
gradio_interface = gr.Interface( | |
fn=inference, | |
inputs="text", | |
outputs="text" # Change output to text to return base64 string | |
) | |
if __name__ == "__main__": | |
gradio_interface.launch() | |