RanM commited on
Commit
c301a62
·
verified ·
1 Parent(s): 1d0b035

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -5
app.py CHANGED
@@ -9,16 +9,33 @@ import json
9
  # Load the model once outside of the function
10
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
11
 
 
 
 
 
 
 
 
12
  def generate_image(text, sentence_mapping, character_dict, selected_style):
13
  try:
14
  prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
15
  print(f"Generated prompt: {prompt}")
 
 
 
16
  output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
17
  print(f"Model output: {output}")
18
- image = output.images[0]
19
- buffered = BytesIO()
20
- image_bytes = buffered.getvalue()
21
- return image_bytes
 
 
 
 
 
 
 
22
  except Exception as e:
23
  print(f"Error generating image: {e}")
24
  return None
@@ -47,7 +64,7 @@ gradio_interface = gr.Interface(
47
  gr.JSON(label="Character Dict"),
48
  gr.Dropdown(["Style 1", "Style 2", "Style 3"], label="Selected Style")
49
  ],
50
- outputs="json"
51
  )
52
 
53
  if __name__ == "__main__":
 
9
  # Load the model once outside of the function
10
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
11
 
12
+ # Helper function to truncate prompt to fit the model's maximum sequence length
13
+ def truncate_prompt(prompt, max_length=77):
14
+ tokens = prompt.split()
15
+ if len(tokens) > max_length:
16
+ return ' '.join(tokens[:max_length])
17
+ return prompt
18
+
19
  def generate_image(text, sentence_mapping, character_dict, selected_style):
20
  try:
21
  prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
22
  print(f"Generated prompt: {prompt}")
23
+
24
+ # Truncate prompt if necessary
25
+ prompt = truncate_prompt(prompt)
26
  output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
27
  print(f"Model output: {output}")
28
+
29
+ # Check if the model returned images
30
+ if output.images:
31
+ image = output.images[0]
32
+ buffered = BytesIO()
33
+ image.save(buffered, format="JPEG")
34
+ image_bytes = buffered.getvalue()
35
+ return image_bytes
36
+ else:
37
+ raise Exception("No images returned by the model.")
38
+
39
  except Exception as e:
40
  print(f"Error generating image: {e}")
41
  return None
 
64
  gr.JSON(label="Character Dict"),
65
  gr.Dropdown(["Style 1", "Style 2", "Style 3"], label="Selected Style")
66
  ],
67
+ outputs="json"
68
  )
69
 
70
  if __name__ == "__main__":