RanM commited on
Commit
3486e1a
·
verified ·
1 Parent(s): c0cd59a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import torch
3
  from diffusers import AutoPipelineForText2Image
@@ -14,10 +15,9 @@ def generate_image(text, sentence_mapping, character_dict, selected_style):
14
  prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
15
  image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
16
  buffered = BytesIO()
 
17
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
18
- if isinstance(result, img_str):
19
- image_bytes = base64.b64decode(result)
20
- return image_bytes
21
  except Exception as e:
22
  print(f"Error generating image: {e}")
23
  return None
@@ -25,14 +25,19 @@ def generate_image(text, sentence_mapping, character_dict, selected_style):
25
  def inference(sentence_mapping, character_dict, selected_style):
26
  images = {}
27
  print(f'sentence_mapping:{sentence_mapping}, character_dict:{character_dict}, selected_style:{selected_style}')
28
- # Here we assume `sentence_mapping` is a dictionary where keys are paragraph numbers and values are lists of sentences
29
- grouped_sentences = sentence_mapping
 
 
 
 
 
30
 
31
  with ThreadPoolExecutor() as executor:
32
  futures = {}
33
  for paragraph_number, sentences in grouped_sentences.items():
34
  combined_sentence = " ".join(sentences)
35
- futures[paragraph_number] = executor.submit(generate_image, combined_sentence, sentence_mapping, character_dict, selected_style)
36
 
37
  for paragraph_number, future in futures.items():
38
  images[paragraph_number] = future.result()
@@ -42,11 +47,11 @@ def inference(sentence_mapping, character_dict, selected_style):
42
  gradio_interface = gr.Interface(
43
  fn=inference,
44
  inputs=[
45
- gr.Textbox(label="Sentence Mapping"),
46
- gr.Textbox(label="Character Dict"),
47
  gr.Dropdown(["Style 1", "Style 2", "Style 3"], label="Selected Style")
48
  ],
49
- outputs="text"
50
  )
51
 
52
  if __name__ == "__main__":
 
1
+ import json
2
  import gradio as gr
3
  import torch
4
  from diffusers import AutoPipelineForText2Image
 
15
  prompt, _ = generate_prompt(text, sentence_mapping, character_dict, selected_style)
16
  image = model(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
17
  buffered = BytesIO()
18
+ buffered.write(image.tobytes())
19
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
20
+ return img_str
 
 
21
  except Exception as e:
22
  print(f"Error generating image: {e}")
23
  return None
 
25
  def inference(sentence_mapping, character_dict, selected_style):
26
  images = {}
27
  print(f'sentence_mapping:{sentence_mapping}, character_dict:{character_dict}, selected_style:{selected_style}')
28
+
29
+ # Parse sentence_mapping JSON string into a dictionary
30
+ try:
31
+ grouped_sentences = json.loads(sentence_mapping)
32
+ except json.JSONDecodeError as e:
33
+ print(f"Error parsing JSON: {e}")
34
+ return {"error": "Invalid JSON input for sentence_mapping"}
35
 
36
  with ThreadPoolExecutor() as executor:
37
  futures = {}
38
  for paragraph_number, sentences in grouped_sentences.items():
39
  combined_sentence = " ".join(sentences)
40
+ futures[paragraph_number] = executor.submit(generate_image, combined_sentence, grouped_sentences, character_dict, selected_style)
41
 
42
  for paragraph_number, future in futures.items():
43
  images[paragraph_number] = future.result()
 
47
  gradio_interface = gr.Interface(
48
  fn=inference,
49
  inputs=[
50
+ gr.Textbox(label="Sentence Mapping (JSON)"),
51
+ gr.Textbox(label="Character Dict (JSON)"),
52
  gr.Dropdown(["Style 1", "Style 2", "Style 3"], label="Selected Style")
53
  ],
54
+ outputs="text"
55
  )
56
 
57
  if __name__ == "__main__":