Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
import os
|
2 |
import asyncio
|
|
|
3 |
from io import BytesIO
|
4 |
from diffusers import AutoPipelineForText2Image
|
5 |
import gradio as gr
|
6 |
from generate_prompts import generate_prompt
|
7 |
|
8 |
-
# Initialize model
|
9 |
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
|
10 |
|
11 |
-
|
12 |
"""
|
13 |
Generates an image based on the provided prompt.
|
14 |
Parameters:
|
@@ -19,7 +20,7 @@ async def generate_image(prompt, prompt_name):
|
|
19 |
"""
|
20 |
try:
|
21 |
print(f"Generating image for {prompt_name}")
|
22 |
-
output = model(prompt=prompt, num_inference_steps=
|
23 |
if isinstance(output.images, list) and len(output.images) > 0:
|
24 |
image = output.images[0]
|
25 |
buffered = BytesIO()
|
@@ -34,7 +35,7 @@ async def generate_image(prompt, prompt_name):
|
|
34 |
|
35 |
async def queue_api_calls(sentence_mapping, character_dict, selected_style):
|
36 |
"""
|
37 |
-
Generates images for all provided prompts in parallel using
|
38 |
Parameters:
|
39 |
- sentence_mapping (dict): Mapping between paragraph numbers and sentences.
|
40 |
- character_dict (dict): Dictionary mapping characters to their descriptions.
|
@@ -48,8 +49,13 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
|
|
48 |
prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style)
|
49 |
prompts.append((paragraph_number, prompt))
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
|
55 |
return images
|
|
|
1 |
import os
|
2 |
import asyncio
|
3 |
+
import concurrent.futures
|
4 |
from io import BytesIO
|
5 |
from diffusers import AutoPipelineForText2Image
|
6 |
import gradio as gr
|
7 |
from generate_prompts import generate_prompt
|
8 |
|
9 |
+
# Initialize model globally
|
10 |
model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
|
11 |
|
12 |
+
def generate_image(prompt, prompt_name):
|
13 |
"""
|
14 |
Generates an image based on the provided prompt.
|
15 |
Parameters:
|
|
|
20 |
"""
|
21 |
try:
|
22 |
print(f"Generating image for {prompt_name}")
|
23 |
+
output = model(prompt=prompt, num_inference_steps=50, guidance_scale=7.5)
|
24 |
if isinstance(output.images, list) and len(output.images) > 0:
|
25 |
image = output.images[0]
|
26 |
buffered = BytesIO()
|
|
|
35 |
|
36 |
async def queue_api_calls(sentence_mapping, character_dict, selected_style):
|
37 |
"""
|
38 |
+
Generates images for all provided prompts in parallel using ProcessPoolExecutor.
|
39 |
Parameters:
|
40 |
- sentence_mapping (dict): Mapping between paragraph numbers and sentences.
|
41 |
- character_dict (dict): Dictionary mapping characters to their descriptions.
|
|
|
49 |
prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style)
|
50 |
prompts.append((paragraph_number, prompt))
|
51 |
|
52 |
+
loop = asyncio.get_running_loop()
|
53 |
+
with concurrent.futures.ProcessPoolExecutor() as pool:
|
54 |
+
tasks = [
|
55 |
+
loop.run_in_executor(pool, generate_image, prompt, f"Prompt {paragraph_number}")
|
56 |
+
for paragraph_number, prompt in prompts
|
57 |
+
]
|
58 |
+
responses = await asyncio.gather(*tasks)
|
59 |
|
60 |
images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
|
61 |
return images
|