Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from diffusers import AutoPipelineForText2Image | |
| from io import BytesIO | |
| from generate_propmts import generate_prompt | |
| from concurrent.futures import ThreadPoolExecutor, as_completed | |
| import json | |
| # Load the model once outside of the function | |
| model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo") | |
| def generate_image(prompt): | |
| try: | |
| # Truncate prompt if necessary | |
| output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0] | |
| print(f"Model output: {output}") | |
| # Check if the model returned images | |
| if output.images: | |
| image = output.images[0] | |
| buffered = BytesIO() | |
| image.save(buffered, format="JPEG") | |
| image_bytes = buffered.getvalue() | |
| return image_bytes | |
| else: | |
| raise Exception("No images returned by the model.") | |
| except Exception as e: | |
| print(f"Error generating image: {e}") | |
| return None | |
| def inference(sentence_mapping, character_dict, selected_style): | |
| images = {} | |
| print(f'sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}') | |
| prompts = [] | |
| # Generate prompts for each paragraph | |
| for paragraph_number, sentences in sentence_mapping.items(): | |
| combined_sentence = " ".join(sentences) | |
| prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style) | |
| prompts.append((paragraph_number, prompt)) | |
| print(f"Generated prompt for paragraph {paragraph_number}: {prompt}") | |
| with ThreadPoolExecutor() as executor: | |
| future_to_paragraph = {executor.submit(generate_image, prompt): paragraph_number for paragraph_number, prompt in prompts} | |
| for future in as_completed(future_to_paragraph): | |
| paragraph_number = future_to_paragraph[future] | |
| try: | |
| image = future.result() | |
| if image: | |
| images[paragraph_number] = image | |
| except Exception as e: | |
| print(f"Error processing paragraph {paragraph_number}: {e}") | |
| return images | |
| gradio_interface = gr.Interface( | |
| fn=inference, | |
| inputs=[ | |
| gr.JSON(label="Sentence Mapping"), | |
| gr.JSON(label="Character Dict"), | |
| gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style") | |
| ], | |
| outputs="json" | |
| ) | |
| if __name__ == "__main__": | |
| gradio_interface.launch() | |