RanM commited on
Commit
441106f
·
verified ·
1 Parent(s): a865152

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import asyncio
 
3
  from generate_prompts import generate_prompt
4
  from diffusers import AutoPipelineForText2Image
5
  from io import BytesIO
@@ -16,10 +17,12 @@ class ModelActor:
16
  print("Model loaded successfully.")
17
 
18
  def generate_image(self, prompt, prompt_name):
 
 
19
  try:
20
- print(f"Generating response for {prompt_name} with prompt: {prompt}")
21
  output = self.model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
22
- print(f"Output for {prompt_name}: {output}")
23
 
24
  if isinstance(output.images, list) and len(output.images) > 0:
25
  image = output.images[0]
@@ -27,15 +30,17 @@ class ModelActor:
27
  try:
28
  image.save(buffered, format="JPEG")
29
  image_bytes = buffered.getvalue()
30
- print(f"Image bytes length for {prompt_name}: {len(image_bytes)}")
 
 
31
  return image_bytes
32
  except Exception as e:
33
- print(f"Error saving image for {prompt_name}: {e}")
34
  return None
35
  else:
36
- raise Exception(f"No images returned by the model for {prompt_name}.")
37
  except Exception as e:
38
- print(f"Error generating image for {prompt_name}: {e}")
39
  return None
40
 
41
  model_actor = ModelActor.remote()
@@ -54,8 +59,7 @@ async def queue_api_calls(sentence_mapping, character_dict, selected_style):
54
  tasks = [model_actor.generate_image.remote(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
55
  print("Tasks created for image generation.")
56
 
57
- # Use asyncio to gather the results asynchronously
58
- responses = await asyncio.gather(*[ray.get(task) for task in tasks])
59
  print("Responses received from image generation tasks.")
60
 
61
  images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}
 
1
  import os
2
  import asyncio
3
+ import time
4
  from generate_prompts import generate_prompt
5
  from diffusers import AutoPipelineForText2Image
6
  from io import BytesIO
 
17
  print("Model loaded successfully.")
18
 
19
  def generate_image(self, prompt, prompt_name):
20
+ start_time = time.time()
21
+ process_id = os.getpid()
22
  try:
23
+ print(f"[{process_id}] Generating response for {prompt_name} with prompt: {prompt}")
24
  output = self.model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
25
+ print(f"[{process_id}] Output for {prompt_name}: {output}")
26
 
27
  if isinstance(output.images, list) and len(output.images) > 0:
28
  image = output.images[0]
 
30
  try:
31
  image.save(buffered, format="JPEG")
32
  image_bytes = buffered.getvalue()
33
+ end_time = time.time()
34
+ print(f"[{process_id}] Image bytes length for {prompt_name}: {len(image_bytes)}")
35
+ print(f"[{process_id}] Time taken for {prompt_name}: {end_time - start_time} seconds")
36
  return image_bytes
37
  except Exception as e:
38
+ print(f"[{process_id}] Error saving image for {prompt_name}: {e}")
39
  return None
40
  else:
41
+ raise Exception(f"[{process_id}] No images returned by the model for {prompt_name}.")
42
  except Exception as e:
43
+ print(f"[{process_id}] Error generating image for {prompt_name}: {e}")
44
  return None
45
 
46
  model_actor = ModelActor.remote()
 
59
  tasks = [model_actor.generate_image.remote(prompt, f"Prompt {paragraph_number}") for paragraph_number, prompt in prompts]
60
  print("Tasks created for image generation.")
61
 
62
+ responses = await asyncio.gather(*[asyncio.to_thread(ray.get, task) for task in tasks])
 
63
  print("Responses received from image generation tasks.")
64
 
65
  images = {paragraph_number: response for (paragraph_number, _), response in zip(prompts, responses)}