Spaces:
Runtime error
Runtime error
File size: 2,495 Bytes
c513221 f718e1b 5e2c7ed 6b1b953 dcf2145 5e2c7ed 86743ba 5c3986b 6b1b953 51e61c2 6b1b953 5d9bf5a f718e1b eb48f29 6b1b953 f718e1b 6b1b953 6292767 f718e1b 6292767 f718e1b eb48f29 f718e1b eb48f29 f718e1b eb48f29 f718e1b eb48f29 f718e1b 3b7350e f718e1b 422af54 f718e1b 081cd9c 690f094 f718e1b 5c3986b bdf16c0 bb032a8 bdf16c0 f466dd9 5c3986b 630a72e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import asyncio
from io import BytesIO
from PIL import Image
from diffusers import AutoPipelineForText2Image
import gradio as gr
print("Loading the Stable Diffusion model...")
try:
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
print("Model loaded successfully.")
except Exception as e:
print(f"Error loading model: {e}")
model = None
def generate_image(prompt, prompt_name):
try:
if model is None:
raise ValueError("Model not loaded properly.")
print(f"Generating image for {prompt_name} with prompt: {prompt}")
output = model(prompt=prompt, num_inference_steps=50, guidance_scale=7.5)
print(f"Model output for {prompt_name}: {output}")
if output is None:
raise ValueError(f"Model returned None for {prompt_name}")
if hasattr(output, 'images') and output.images:
print(f"Image generated for {prompt_name}")
image = output.images[0]
buffered = BytesIO()
image.save(buffered, format="PNG")
image_bytes = buffered.getvalue()
return image_bytes
else:
print(f"No images found in model output for {prompt_name}")
raise ValueError(f"No images found in model output for {prompt_name}")
except Exception as e:
print(f"An error occurred while generating image for {prompt_name}: {e}")
return None
def process_prompt(sentence_mapping, character_dict, selected_style):
print("Processing prompt...")
print(f"Sentence Mapping: {sentence_mapping}")
print(f"Character Dict: {character_dict}")
print(f"Selected Style: {selected_style}")
prompt_results = {}
for paragraph_number, sentences in sentence_mapping.items():
combined_sentence = " ".join(sentences)
prompt = f"Make an illustration in {selected_style} style from: {combined_sentence}"
image_bytes = generate_image(prompt, f"Prompt {paragraph_number}")
prompt_results[paragraph_number] = image_bytes
return prompt_results
gradio_interface = gr.Interface(
fn=process_prompt,
inputs=[
gr.JSON(label="Sentence Mapping"),
gr.JSON(label="Character Dict"),
gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style")
],
outputs="json"
).queue(concurrency_limit=10)
if __name__ == "__main__":
print("Launching Gradio interface...")
gradio_interface.launch()
|