from generate_propmts import generate_prompt import gradio as gr import torch from diffusers import AutoPipelineForText2Image from io import BytesIO import asyncio # Load the model once outside of the function model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo") async def generate_image(prompt): try: # Generate an image based on the prompt output = await asyncio.to_thread(model, prompt=prompt, num_inference_steps=1, guidance_scale=0.0) print(f"Model output: {output}") # Check if the model returned images if isinstance(output.images, list) and len(output.images) > 0: image = output.images[0] buffered = BytesIO() try: image.save(buffered, format="JPEG") image_bytes = buffered.getvalue() # Verify the image bytes print(f"Image bytes length: {len(image_bytes)}") return image_bytes except Exception as e: print(f"Error saving image: {e}") return None else: raise Exception("No images returned by the model.") except Exception as e: print(f"Error generating image: {e}") return None async def process_prompt(sentence_mapping, character_dict, selected_style): images = {} print(f'sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}') prompts = [] # Generate prompts for each paragraph for paragraph_number, sentences in sentence_mapping.items(): combined_sentence = " ".join(sentences) prompt = generate_prompt(combined_sentence, character_dict, selected_style) prompts.append((paragraph_number, prompt)) print(f"Generated prompt for paragraph {paragraph_number}: {prompt}") # Create tasks for all prompts and run them concurrently tasks = [generate_image(prompt) for _, prompt in prompts] results = await asyncio.gather(*tasks) # Map results back to paragraphs for i, (paragraph_number, _) in enumerate(prompts): if i < len(results): images[paragraph_number] = results[i] else: print(f"Error: No result for paragraph {paragraph_number}") return images # Helper function to generate a prompt based on the input def generate_prompt(combined_sentence, character_dict, selected_style): characters = " ".join([" ".join(character) if isinstance(character, list) else character for character in character_dict.values()]) return f"Make an illustration in {selected_style} style from: {characters}. {combined_sentence}" # Gradio interface with high concurrency limit gradio_interface = gr.Interface( fn=process_prompt, inputs=[ gr.JSON(label="Sentence Mapping"), gr.JSON(label="Character Dict"), gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style") ], outputs="json", concurrency_limit=20 # Set a high concurrency limit ).queue(default_concurrency_limit=20) if __name__ == "__main__": gradio_interface.launch() # No need for share=True for local testing