RanM commited on
Commit
3a80045
·
verified ·
1 Parent(s): c14304d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -11
app.py CHANGED
@@ -4,7 +4,7 @@ from generate_prompts import generate_prompt
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
6
  import gradio as gr
7
- from concurrent.futures import ProcessPoolExecutor
8
 
9
  # Load the model once outside of the function
10
  print("Loading the model...")
@@ -43,19 +43,14 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
43
  for paragraph_number, sentences in sentence_mapping.items():
44
  combined_sentence = " ".join(sentences)
45
  print(f"combined_sentence for paragraph {paragraph_number}: {combined_sentence}")
46
- prompt = generate_prompt(combined_sentence, character_dict, selected_style)
47
  prompts.append((paragraph_number, prompt))
48
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
49
 
50
- # Set max_workers to the total number of prompts
51
- max_workers = len(prompts)
52
-
53
- # Generate images for each prompt in parallel using multiprocessing
54
- with ProcessPoolExecutor(max_workers=max_workers) as executor:
55
- loop = asyncio.get_running_loop()
56
- tasks = [loop.run_in_executor(executor, generate_image, prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
57
- print("Tasks created for image generation.")
58
- responses = await asyncio.gather(*tasks)
59
  print("Responses received from image generation tasks.")
60
 
61
  images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
 
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
6
  import gradio as gr
7
+ from multiprocessing import Pool, cpu_count
8
 
9
  # Load the model once outside of the function
10
  print("Loading the model...")
 
43
  for paragraph_number, sentences in sentence_mapping.items():
44
  combined_sentence = " ".join(sentences)
45
  print(f"combined_sentence for paragraph {paragraph_number}: {combined_sentence}")
46
+ prompt = generate_prompt(combined_sentence, sentence_mapping, character_dict, selected_style)
47
  prompts.append((paragraph_number, prompt))
48
  print(f"Generated prompt for paragraph {paragraph_number}: {prompt}")
49
 
50
+ # Use multiprocessing Pool to generate images in parallel
51
+ with Pool(cpu_count()) as pool:
52
+ tasks = [(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
53
+ responses = pool.starmap(generate_image, tasks)
 
 
 
 
 
54
  print("Responses received from image generation tasks.")
55
 
56
  images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}