RanM commited on
Commit
e08a897
·
verified ·
1 Parent(s): 3fe9b2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -27
app.py CHANGED
@@ -4,39 +4,39 @@ from generate_prompts import generate_prompt
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
6
  import gradio as gr
 
7
 
8
  # Load the model once outside of the function
9
  print("Loading the model...")
10
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
11
  print("Model loaded successfully.")
12
 
13
- # Create an asyncio lock
14
- lock = asyncio.Lock()
15
 
16
- async def generate_image(prompt, prompt_name):
17
- async with lock:
18
- try:
19
- print(f"Generating response for {prompt_name} with prompt: {prompt}")
20
- output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
21
- print(f"Output for {prompt_name}: {output}")
22
 
23
- # Check if the model returned images
24
- if isinstance(output.images, list) and len(output.images) > 0:
25
- image = output.images[0]
26
- buffered = BytesIO()
27
- try:
28
- image.save(buffered, format="JPEG")
29
- image_bytes = buffered.getvalue()
30
- print(f"Image bytes length for {prompt_name}: {len(image_bytes)}")
31
- return image_bytes
32
- except Exception as e:
33
- print(f"Error saving image for {prompt_name}: {e}")
34
- return None
35
- else:
36
- raise Exception(f"No images returned by the model for {prompt_name}.")
37
- except Exception as e:
38
- print(f"Error generating image for {prompt_name}: {e}")
39
- return None
40
 
41
  async def queue_api_calls(sentence_mapping, character_dict, selected_style):
42
  print(f"queue_api_calls invoked with sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}")
@@ -46,13 +46,13 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
46
  for paragraph_number, sentences in sentence_mapping.items():
47
  combined_sentence = " ".join(sentences)
48
  print(f"combined_sentence for paragraph {paragraph_number}: {combined_sentence}")
49
- prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style)
50
  prompts.append((paragraph_number, prompt))
51
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
52
 
53
  # Generate images for each prompt in parallel
54
  loop = asyncio.get_running_loop()
55
- tasks = [generate_image(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
56
  print("Tasks created for image generation.")
57
  responses = await asyncio.gather(*tasks)
58
  print("Responses received from image generation tasks.")
@@ -72,6 +72,9 @@ def process_prompt(sentence_mapping, character_dict, selected_style):
72
  asyncio.set_event_loop(loop)
73
  print("Event loop created.")
74
 
 
 
 
75
  # This sends the prompts to function that sets up the async calls. Once all the calls to the API complete, it returns a list of the gr.Textbox with value= set.
76
  cmpt_return = loop.run_until_complete(queue_api_calls(sentence_mapping, character_dict, selected_style))
77
  print(f"process_prompt completed with return value: {cmpt_return}")
 
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
6
  import gradio as gr
7
+ import threading
8
 
9
  # Load the model once outside of the function
10
  print("Loading the model...")
11
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
12
  print("Model loaded successfully.")
13
 
14
+ # Create a thread-local storage object
15
+ thread_local = threading.local()
16
 
17
+ def generate_image(prompt, prompt_name):
18
+ try:
19
+ print(f"Generating response for {prompt_name} with prompt: {prompt}")
20
+ output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
21
+ print(f"Output for {prompt_name}: {output}")
 
22
 
23
+ # Check if the model returned images
24
+ if isinstance(output.images, list) and len(output.images) > 0:
25
+ image = output.images[0]
26
+ buffered = BytesIO()
27
+ try:
28
+ image.save(buffered, format="JPEG")
29
+ image_bytes = buffered.getvalue()
30
+ print(f"Image bytes length for {prompt_name}: {len(image_bytes)}")
31
+ return image_bytes
32
+ except Exception as e:
33
+ print(f"Error saving image for {prompt_name}: {e}")
34
+ return None
35
+ else:
36
+ raise Exception(f"No images returned by the model for {prompt_name}.")
37
+ except Exception as e:
38
+ print(f"Error generating image for {prompt_name}: {e}")
39
+ return None
40
 
41
  async def queue_api_calls(sentence_mapping, character_dict, selected_style):
42
  print(f"queue_api_calls invoked with sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}")
 
46
  for paragraph_number, sentences in sentence_mapping.items():
47
  combined_sentence = " ".join(sentences)
48
  print(f"combined_sentence for paragraph {paragraph_number}: {combined_sentence}")
49
+ prompt = generate_prompt(combined_sentence, character_dict, selected_style)
50
  prompts.append((paragraph_number, prompt))
51
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
52
 
53
  # Generate images for each prompt in parallel
54
  loop = asyncio.get_running_loop()
55
+ tasks = [loop.run_in_executor(None, generate_image, prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
56
  print("Tasks created for image generation.")
57
  responses = await asyncio.gather(*tasks)
58
  print("Responses received from image generation tasks.")
 
72
  asyncio.set_event_loop(loop)
73
  print("Event loop created.")
74
 
75
+ # Initialize thread-local variables
76
+ thread_local.step = 0
77
+
78
  # This sends the prompts to function that sets up the async calls. Once all the calls to the API complete, it returns a list of the gr.Textbox with value= set.
79
  cmpt_return = loop.run_until_complete(queue_api_calls(sentence_mapping, character_dict, selected_style))
80
  print(f"process_prompt completed with return value: {cmpt_return}")