text2image_1 / app.py
RanM's picture
Update app.py
7aae5d0 verified
raw
history blame
1.81 kB
import gradio as gr
from diffusers import AutoPipelineForText2Image
from generate_prompts import generate_prompt
# Load the model once outside of the function
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
def generate_image(prompt, prompt_name):
try:
print(f"Generating image for {prompt_name}")
output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
image = output.images[0]
img_bytes = image.tobytes()
print(f"Image bytes length for {prompt_name}: {len(img_bytes)}")
return img_bytes
except Exception as e:
print(f"Error generating image for {prompt_name}: {e}")
return None
def gradio_interface(sentence_mapping, character_dict, selected_style):
prompts = generate_prompt(sentence_mapping, character_dict, selected_style)
image_bytes_list = [generate_image(prompt, f"Prompt {i}") for i, prompt in enumerate(prompts)]
outputs = [gr.Image.update(value=img_bytes) if img_bytes else gr.Image.update(value=None) for img_bytes in image_bytes_list]
return outputs
# Gradio Interface
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
sentence_mapping_input = gr.Textbox(label="Sentence Mapping")
character_dict_input = gr.Textbox(label="Character Dictionary")
selected_style_input = gr.Textbox(label="Selected Style")
submit_btn = gr.Button(value='Submit')
prompt_responses = [] # Empty list for dynamic addition of Image components
submit_btn.click(fn=gradio_interface,
inputs=[sentence_mapping_input, character_dict_input, selected_style_input],
outputs=prompt_responses)
if __name__ == "__main__":
demo.launch()