RanM commited on
Commit
bdf16c0
·
verified ·
1 Parent(s): 5e2c7ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -27
app.py CHANGED
@@ -3,18 +3,11 @@ import asyncio
3
  from generate_prompts import generate_prompt
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
6
- import json
7
  import gradio as gr
8
 
9
  # Load the model once outside of the function
10
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
11
 
12
-
13
- prompt1 = "write a 5 paragraph explanation of how to use python async and await. Return a JSON structure as follows {'prompt_name': 'prompt1','response': '[response]'}"
14
- prompt2 = "write a 5 paragraph explanation of limitations for using asyncio.run(). Return a JSON structure as follows {'prompt_name': 'prompt2','response': '[response}'}"
15
- prompt3 = "write a 5 paragraph explanation of how to use asyncio.get_running_loop(). Return a JSON structure as follows {'prompt_name': 'prompt3','response': '[response]'}"
16
- prompt4 = "write a 5 paragraph explanation of how to use asyncio.gather(). Return a JSON structure as follows {'prompt_name': 'prompt4','response': '[response]'}"
17
-
18
  async def generate_image(prompt, prompt_name):
19
  try:
20
  print(f"Generating response for {prompt_name}")
@@ -41,52 +34,44 @@ async def generate_image(prompt, prompt_name):
41
  async def queue_api_calls(sentence_mapping, character_dict, selected_style):
42
  print(f'sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}')
43
  prompts = []
44
-
45
  # Generate prompts for each paragraph
46
  for paragraph_number, sentences in sentence_mapping.items():
47
  combined_sentence = " ".join(sentences)
48
  prompt = generate_prompt(combined_sentence, character_dict, selected_style)
49
  prompts.append((paragraph_number, prompt))
50
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
51
-
 
52
  tasks = [generate_image(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
53
- responses = await asyncio.gather(generate_image(*task))
54
 
55
- #Note: Although the API calls get processed in async order, asyncio.gather and returns them in the request order
56
- images = {}
57
-
58
- # Iterate through each response
59
- # Map results back to paragraphs
60
- for i, (paragraph_number, _) in enumerate(prompts):
61
- if i < len(results):
62
- images[paragraph_number] = results[i]
63
- else:
64
- print(f"Error: No result for paragraph {paragraph_number}")
65
-
66
  return images
67
 
68
  def process_prompt(sentence_mapping, character_dict, selected_style):
69
  try:
70
- #see if there is a loop already running. If there is, reuse it.
71
  loop = asyncio.get_running_loop()
72
  except RuntimeError:
73
  # Create new event loop if one is not running
74
  loop = asyncio.new_event_loop()
75
  asyncio.set_event_loop(loop)
76
 
77
- #this sends the prompts to function that sets up the async calls. Once all the calls to the API complete, it returns a list of the gr.Textbox with value= set.
78
  cmpt_return = loop.run_until_complete(queue_api_calls(sentence_mapping, character_dict, selected_style))
79
  return cmpt_return
80
 
81
  # Gradio interface with high concurrency limit
82
  gradio_interface = gr.Interface(
83
- fn=process_prompt,
84
  inputs=[
85
  gr.JSON(label="Sentence Mapping"),
86
  gr.JSON(label="Character Dict"),
87
  gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style")
88
  ],
89
- outputs="json")
90
-
 
91
  if __name__ == "__main__":
92
- demo.launch()
 
3
  from generate_prompts import generate_prompt
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
 
6
  import gradio as gr
7
 
8
  # Load the model once outside of the function
9
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
10
 
 
 
 
 
 
 
11
  async def generate_image(prompt, prompt_name):
12
  try:
13
  print(f"Generating response for {prompt_name}")
 
34
  async def queue_api_calls(sentence_mapping, character_dict, selected_style):
35
  print(f'sentence_mapping: {sentence_mapping}, character_dict: {character_dict}, selected_style: {selected_style}')
36
  prompts = []
37
+
38
  # Generate prompts for each paragraph
39
  for paragraph_number, sentences in sentence_mapping.items():
40
  combined_sentence = " ".join(sentences)
41
  prompt = generate_prompt(combined_sentence, character_dict, selected_style)
42
  prompts.append((paragraph_number, prompt))
43
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
44
+
45
+ # Generate images for each prompt in parallel
46
  tasks = [generate_image(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
47
+ responses = await asyncio.gather(*tasks)
48
 
49
+ images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
 
 
 
 
 
 
 
 
 
 
50
  return images
51
 
52
  def process_prompt(sentence_mapping, character_dict, selected_style):
53
  try:
54
+ # See if there is a loop already running. If there is, reuse it.
55
  loop = asyncio.get_running_loop()
56
  except RuntimeError:
57
  # Create new event loop if one is not running
58
  loop = asyncio.new_event_loop()
59
  asyncio.set_event_loop(loop)
60
 
61
+ # This sends the prompts to function that sets up the async calls. Once all the calls to the API complete, it returns a list of the gr.Textbox with value= set.
62
  cmpt_return = loop.run_until_complete(queue_api_calls(sentence_mapping, character_dict, selected_style))
63
  return cmpt_return
64
 
65
  # Gradio interface with high concurrency limit
66
  gradio_interface = gr.Interface(
67
+ fn=process_prompt,
68
  inputs=[
69
  gr.JSON(label="Sentence Mapping"),
70
  gr.JSON(label="Character Dict"),
71
  gr.Dropdown(["oil painting", "sketch", "watercolor"], label="Selected Style")
72
  ],
73
+ outputs="json"
74
+ )
75
+
76
  if __name__ == "__main__":
77
+ gradio_interface.launch()