RanM commited on
Commit
8cc73b6
·
verified ·
1 Parent(s): dab19e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -12
app.py CHANGED
@@ -1,22 +1,20 @@
 
1
  import asyncio
2
  from generate_prompts import generate_prompt
3
  from diffusers import AutoPipelineForText2Image
4
  from io import BytesIO
5
  import gradio as gr
 
6
 
7
- # Asynchronously load the model once outside of the function
8
- model = None
 
 
9
 
10
- async def load_model():
11
- global model
12
- print("Loading the model...")
13
- model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
14
- print("Model loaded successfully.")
15
 
16
- # Run the model loading
17
- asyncio.run(load_model())
18
-
19
- async def generate_image(prompt, prompt_name):
20
  try:
21
  print(f"Generating response for {prompt_name} with prompt: {prompt}")
22
  output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
@@ -53,7 +51,8 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
53
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
54
 
55
  # Generate images for each prompt in parallel
56
- tasks = [generate_image(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
 
57
  print("Tasks created for image generation.")
58
  responses = await asyncio.gather(*tasks)
59
  print("Responses received from image generation tasks.")
@@ -73,6 +72,9 @@ def process_prompt(sentence_mapping, character_dict, selected_style):
73
  asyncio.set_event_loop(loop)
74
  print("Event loop created.")
75
 
 
 
 
76
  # This sends the prompts to function that sets up the async calls. Once all the calls to the API complete, it returns a list of the gr.Textbox with value= set.
77
  cmpt_return = loop.run_until_complete(queue_api_calls(sentence_mapping, character_dict, selected_style))
78
  print(f"process_prompt completed with return value: {cmpt_return}")
 
1
+ import os
2
  import asyncio
3
  from generate_prompts import generate_prompt
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
6
  import gradio as gr
7
+ import threading
8
 
9
+ # Load the model once outside of the function
10
+ print("Loading the model...")
11
+ model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
12
+ print("Model loaded successfully.")
13
 
14
+ # Create a thread-local storage object
15
+ thread_local = threading.local()
 
 
 
16
 
17
+ def generate_image(prompt, prompt_name):
 
 
 
18
  try:
19
  print(f"Generating response for {prompt_name} with prompt: {prompt}")
20
  output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
 
51
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
52
 
53
  # Generate images for each prompt in parallel
54
+ loop = asyncio.get_running_loop()
55
+ tasks = [loop.run_in_executor(None, generate_image, prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
56
  print("Tasks created for image generation.")
57
  responses = await asyncio.gather(*tasks)
58
  print("Responses received from image generation tasks.")
 
72
  asyncio.set_event_loop(loop)
73
  print("Event loop created.")
74
 
75
+ # Initialize thread-local variables
76
+ thread_local.step = 0
77
+
78
  # This sends the prompts to function that sets up the async calls. Once all the calls to the API complete, it returns a list of the gr.Textbox with value= set.
79
  cmpt_return = loop.run_until_complete(queue_api_calls(sentence_mapping, character_dict, selected_style))
80
  print(f"process_prompt completed with return value: {cmpt_return}")