Spaces:
Runtime error
Runtime error
File size: 1,540 Bytes
f466dd9 a9b8939 9e09422 8a0f059 a9b8939 6d1d03a 8a0f059 f466dd9 8a0f059 a9b8939 8a0f059 f466dd9 8a0f059 f466dd9 8a0f059 a9b8939 8a0f059 ca1d41c f466dd9 9e09422 f466dd9 a9b8939 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
import torch
from diffusers import AutoPipelineForText2Image
import base64
from io import BytesIO
from generate_propmts.py import generate_prompt
# Load the model once outside of the function
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
def generate_image(text, sentence_mapping, character_dict, selected_style):
try:
prompt,_ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
buffered = BytesIO()
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
if isinstance(result, img_str):
image_bytes = base64.b64decode(result)
return image_bytes
except Exception as e:
return None
def inference(prompt):
# Dictionary to store images results
images = {}
print(f"Received grouped_sentences: {grouped_sentences}")
# Debugging statement
with concurrent.images.ThreadPoolExecutor() as executor:
for paragraph_number, sentences in grouped_sentences.items():
combined_sentence = " ".join(sentences)
images[paragraph_number] = executor.submit(generate_image, combined_sentence, sentence_mapping, general_descriptions, selected_style)
return images
gradio_interface = gr.Interface(
fn=inference,
inputs="text",
outputs="text" # Change output to text to return base64 string
)
if __name__ == "__main__":
gradio_interface.launch()
|