RanM commited on
Commit
b5ad13a
·
verified ·
1 Parent(s): cd715cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -12
app.py CHANGED
@@ -4,36 +4,48 @@ from diffusers import AutoPipelineForText2Image
4
  import base64
5
  from io import BytesIO
6
  from generate_propmts import generate_prompt
 
 
7
  # Load the model once outside of the function
8
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
9
 
10
  def generate_image(text, sentence_mapping, character_dict, selected_style):
11
  try:
12
- prompt,_ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
13
  image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
14
  buffered = BytesIO()
 
15
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
16
- if isinstance(result, img_str):
17
- image_bytes = base64.b64decode(result)
18
- return image_bytes
19
  except Exception as e:
 
20
  return None
21
 
22
- def inference(prompt):
23
- # Dictionary to store images results
24
  images = {}
25
- print(f"Received grouped_sentences: {grouped_sentences}")
26
- # Debugging statement
27
- with concurrent.images.ThreadPoolExecutor() as executor:
 
 
28
  for paragraph_number, sentences in grouped_sentences.items():
29
  combined_sentence = " ".join(sentences)
30
- images[paragraph_number] = executor.submit(generate_image, combined_sentence, sentence_mapping, general_descriptions, selected_style)
 
 
 
 
31
  return images
32
 
33
  gradio_interface = gr.Interface(
34
  fn=inference,
35
- inputs="text",
36
- outputs="text" # Change output to text to return base64 string
 
 
 
 
 
37
  )
38
 
39
  if __name__ == "__main__":
 
4
  import base64
5
  from io import BytesIO
6
  from generate_propmts import generate_prompt
7
+ from concurrent.futures import ThreadPoolExecutor
8
+
9
  # Load the model once outside of the function
10
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
11
 
12
  def generate_image(text, sentence_mapping, character_dict, selected_style):
13
  try:
14
+ prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
15
  image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
16
  buffered = BytesIO()
17
+ image.save(buffered, format="JPEG")
18
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
19
+ return img_str
 
 
20
  except Exception as e:
21
+ print(f"Error generating image: {e}")
22
  return None
23
 
24
+ def inference(text, sentence_mapping, character_dict, selected_style):
 
25
  images = {}
26
+ # Here we assume `sentence_mapping` is a dictionary where keys are paragraph numbers and values are lists of sentences
27
+ grouped_sentences = sentence_mapping
28
+
29
+ with ThreadPoolExecutor() as executor:
30
+ futures = {}
31
  for paragraph_number, sentences in grouped_sentences.items():
32
  combined_sentence = " ".join(sentences)
33
+ futures[paragraph_number] = executor.submit(generate_image, combined_sentence, sentence_mapping, character_dict, selected_style)
34
+
35
+ for paragraph_number, future in futures.items():
36
+ images[paragraph_number] = future.result()
37
+
38
  return images
39
 
40
  gradio_interface = gr.Interface(
41
  fn=inference,
42
+ inputs=[
43
+ gr.inputs.Textbox(label="Text"),
44
+ gr.inputs.Textbox(label="Sentence Mapping"),
45
+ gr.inputs.Textbox(label="Character Dict"),
46
+ gr.inputs.Dropdown(["Style 1", "Style 2", "Style 3"], label="Selected Style")
47
+ ],
48
+ outputs="json" # Return the dictionary of images
49
  )
50
 
51
  if __name__ == "__main__":