Spaces:
Runtime error
Runtime error
import gradio as gr | |
from diffusers import AutoPipelineForText2Image | |
from generate_propmts import generate_prompt | |
from PIL import Image | |
import asyncio | |
import threading | |
import traceback | |
# Load the model once outside of the function | |
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo") | |
class SchedulerWrapper: | |
def __init__(self, scheduler): | |
self.scheduler = scheduler | |
self._step = threading.local() | |
self._step.step = 0 | |
def step(self, *args, **kwargs): | |
try: | |
self._step.step += 1 | |
return self.scheduler.step(*args, **kwargs) | |
except IndexError: | |
self._step.step = 0 | |
return self.scheduler.step(*args, **kwargs) | |
# Wrap the scheduler | |
model.scheduler = SchedulerWrapper(model.scheduler) | |
async def generate_image(prompt): | |
try: | |
# Set a higher value for num_inference_steps | |
num_inference_steps = 5 # Adjust this value as needed | |
# Use the model to generate an image | |
output = await asyncio.to_thread( | |
model, | |
prompt=prompt, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=0.0, # Typical value for guidance scale in image generation | |
output_type="pil" # Directly get PIL Image objects | |
) | |
# Check for output validity and return | |
if output.images: | |
return output.images[0] | |
else: | |
raise Exception("No images returned by the model.") | |
except Exception as e: | |
print(f"Error generating image: {e}") | |
traceback.print_exc() | |
return None # Return None on error to handle it gracefully in the UI | |
async def inference(sentence_mapping, character_dict, selected_style): | |
images = [] | |
print(f'sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}') | |
prompts = [] | |
# Generate prompts for each paragraph | |
for paragraph_number, sentences in sentence_mapping.items(): | |
combined_sentence = " ".join(sentences) | |
prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style) | |
prompts.append(prompt) | |
print(f"Generated prompt for paragraph {paragraph_number}: {prompt}") | |
# Use asyncio.gather to run generate_image in parallel | |
tasks = [generate_image(prompt) for prompt in prompts] | |
images = await asyncio.gather(*tasks) | |
# Filter out None values | |
images = [image for image in images if image is not None] | |
return images | |
gradio_interface = gr.Interface( | |
fn=inference, | |
inputs=[ | |
gr.JSON(label="Sentence Mapping"), | |
gr.JSON(label="Character Dict"), | |
gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style") | |
], | |
outputs=gr.Gallery(label="Generated Images") | |
) | |
if __name__ == "__main__": | |
gradio_interface.launch() | |