Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from generate_prompts import generate_prompt
|
|
4 |
from diffusers import AutoPipelineForText2Image
|
5 |
from io import BytesIO
|
6 |
import gradio as gr
|
7 |
-
from
|
8 |
|
9 |
# Load the model once outside of the function
|
10 |
print("Loading the model...")
|
@@ -35,7 +35,7 @@ def generate_image(prompt, prompt_name):
|
|
35 |
print(f"Error generating image for {prompt_name}: {e}")
|
36 |
return None
|
37 |
|
38 |
-
async def queue_api_calls(sentence_mapping, character_dict, selected_style):
|
39 |
print(f"queue_api_calls invoked with sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}")
|
40 |
prompts = []
|
41 |
|
@@ -47,13 +47,21 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
|
|
47 |
prompts.append((paragraph_number, prompt))
|
48 |
print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
|
49 |
|
50 |
-
#
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
|
57 |
print(f"Images generated: {images}")
|
58 |
return images
|
59 |
|
|
|
4 |
from diffusers import AutoPipelineForText2Image
|
5 |
from io import BytesIO
|
6 |
import gradio as gr
|
7 |
+
from concurrent.futures import ThreadPoolExecutor
|
8 |
|
9 |
# Load the model once outside of the function
|
10 |
print("Loading the model...")
|
|
|
35 |
print(f"Error generating image for {prompt_name}: {e}")
|
36 |
return None
|
37 |
|
38 |
+
async def queue_api_calls(sentence_mapping, character_dict, selected_style, batch_size=5):
|
39 |
print(f"queue_api_calls invoked with sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}")
|
40 |
prompts = []
|
41 |
|
|
|
47 |
prompts.append((paragraph_number, prompt))
|
48 |
print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
|
49 |
|
50 |
+
# Set max_workers to the total number of prompts
|
51 |
+
max_workers = min(batch_size, len(prompts))
|
52 |
+
|
53 |
+
# Generate images for each prompt in parallel using threading
|
54 |
+
images = {}
|
55 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
56 |
+
loop = asyncio.get_running_loop()
|
57 |
+
for i in range(0, len(prompts), batch_size):
|
58 |
+
batch_prompts = prompts[i:i+batch_size]
|
59 |
+
tasks = [loop.run_in_executor(executor, generate_image, prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in batch_prompts]
|
60 |
+
print("Tasks created for image generation.")
|
61 |
+
responses = await asyncio.gather(*tasks)
|
62 |
+
print("Responses received from image generation tasks.")
|
63 |
+
images.update({paragraph_number: response for (paragraph_number, _), response in zip(batch_prompts, responses)})
|
64 |
|
|
|
65 |
print(f"Images generated: {images}")
|
66 |
return images
|
67 |
|