text2image_1 / app.py
RanM's picture
Update app.py
58f74fc verified
raw
history blame
2.79 kB
import gradio as gr
from diffusers import AutoPipelineForText2Image
from generate_propmts import generate_prompt
from concurrent.futures import ThreadPoolExecutor, as_completed
from PIL import Image
import traceback
# Load the model once outside of the function
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
# Create a thread-local storage for step indices
scheduler_step_storage = threading.local()
def generate_image(prompt):
try:
# Initialize step index per thread if not already set
if not hasattr(scheduler_step_storage, 'step'):
scheduler_step_storage.step = 0
# Use the thread-local step index
output = model(
prompt=prompt,
num_inference_steps=1, # Add a sensible default for inference steps
guidance_scale=0.0,
output_type="pil" # Directly get PIL Image objects
)
# Increment the step index after generating the image
scheduler_step_storage.step += 1
# Check for output validity and return
if output.images:
return output.images[0]
else:
raise Exception("No images returned by the model.")
except Exception as e:
print(f"Error generating image: {e}")
traceback.print_exc()
return None # Return None on error to handle it gracefully in the UI
def inference(sentence_mapping, character_dict, selected_style):
images = []
print(f'sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}')
prompts = []
# Generate prompts for each paragraph
for paragraph_number, sentences in sentence_mapping.items():
combined_sentence = " ".join(sentences)
prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style)
prompts.append(prompt)
print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
with ThreadPoolExecutor() as executor:
futures = [executor.submit(generate_image, prompt) for prompt in prompts]
for future in as_completed(futures):
try:
image = future.result()
if image:
images.append(image)
except Exception as e:
print(f"Error processing prompt: {e}")
traceback.print_exc()
return images
gradio_interface = gr.Interface(
fn=inference,
inputs=[
gr.JSON(label="Sentence Mapping"),
gr.JSON(label="Character Dict"),
gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style")
],
outputs=gr.Gallery(label="Generated Images")
)
if __name__ == "__main__":
gradio_interface.launch()