RanM commited on
Commit
8a555c8
·
verified ·
1 Parent(s): c301a62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -14,18 +14,20 @@ def truncate_prompt(prompt, max_length=77):
14
  tokens = prompt.split()
15
  if len(tokens) > max_length:
16
  return ' '.join(tokens[:max_length])
 
 
17
  return prompt
18
 
19
  def generate_image(text, sentence_mapping, character_dict, selected_style):
20
  try:
21
  prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
22
  print(f"Generated prompt: {prompt}")
23
-
24
  # Truncate prompt if necessary
25
  prompt = truncate_prompt(prompt)
 
26
  output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
27
  print(f"Model output: {output}")
28
-
29
  # Check if the model returned images
30
  if output.images:
31
  image = output.images[0]
 
14
  tokens = prompt.split()
15
  if len(tokens) > max_length:
16
  return ' '.join(tokens[:max_length])
17
+ print("len of tokens:", len(tokens))
18
+ print("len of tokens:", len(prompt))
19
  return prompt
20
 
21
  def generate_image(text, sentence_mapping, character_dict, selected_style):
22
  try:
23
  prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
24
  print(f"Generated prompt: {prompt}")
 
25
  # Truncate prompt if necessary
26
  prompt = truncate_prompt(prompt)
27
+ print(f"truncate_prompt: {prompt}")
28
  output = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0)
29
  print(f"Model output: {output}")
30
+ print("len of output:", len(output))
31
  # Check if the model returned images
32
  if output.images:
33
  image = output.images[0]