BryanW commited on
Commit
6cd3a4b
·
verified ·
1 Parent(s): ccfc2d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -49,7 +49,7 @@ def get_transform(resolution):
49
 
50
  # Image-to-Text Function
51
  @spaces.GPU
52
- def image_to_text(image, prompt, resolution=1024, steps=64, cfg=9.0):
53
  try:
54
  transform = get_transform(resolution)
55
 
@@ -69,7 +69,7 @@ def image_to_text(image, prompt, resolution=1024, steps=64, cfg=9.0):
69
  guidance_scale=cfg,
70
  num_inference_steps=steps,
71
  mask_token_embedding="./mask_token_embedding.pth",
72
- generator=torch.manual_seed(42),
73
  )
74
 
75
  return output.prompts[0]
@@ -79,7 +79,7 @@ def image_to_text(image, prompt, resolution=1024, steps=64, cfg=9.0):
79
 
80
  # Text-to-Image Function
81
  @spaces.GPU
82
- def text_to_image(prompt, negative_prompt, num_images=1, resolution=1024, steps=64, cfg=9.0):
83
  try:
84
  negative_prompt = negative_prompt or "worst quality, low quality, low res, blurry, distortion, watermark, logo, signature, text, jpeg artifacts, signature, sketch, duplicate, ugly, identifying mark"
85
 
@@ -91,7 +91,7 @@ def text_to_image(prompt, negative_prompt, num_images=1, resolution=1024, steps=
91
  guidance_scale=cfg,
92
  num_inference_steps=steps,
93
  mask_token_embedding="./mask_token_embedding.pth",
94
- generator=torch.manual_seed(42),
95
  )
96
 
97
  return output.images
@@ -112,7 +112,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Muddit Unifined Model") as demo:
112
  i2t_prompt_input = gr.Textbox(label="Prompt", value="Please describe this image.", placeholder="Enter your prompt here...")
113
 
114
  with gr.Accordion("Advanced Settings", open=False):
115
- i2t_resolution = gr.Slider(label="Resolution", minimum=256, maximum=1024, value=1024, step=64)
116
  i2t_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=64, step=1)
117
  i2t_cfg = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=9.0, step=0.5)
118
 
@@ -141,7 +141,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Muddit Unifined Model") as demo:
141
  vqa_prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your question here...")
142
 
143
  with gr.Accordion("Advanced Settings", open=False):
144
- vqa_resolution = gr.Slider(label="Resolution", minimum=256, maximum=1024, value=1024, step=64)
145
  vqa_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=64, step=1)
146
  vqa_cfg = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=9.0, step=0.5)
147
 
@@ -174,7 +174,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Muddit Unifined Model") as demo:
174
  t2i_num_images = gr.Slider(label="Number of Images", minimum=1, maximum=4, value=1, step=1)
175
 
176
  with gr.Accordion("Advanced Settings", open=False):
177
- t2i_resolution = gr.Slider(label="Resolution", minimum=256, maximum=1024, value=1024, step=64)
178
  t2i_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=64, step=1)
179
  t2i_cfg = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=9.0, step=0.5)
180
 
@@ -199,19 +199,19 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Muddit Unifined Model") as demo:
199
  # Event handlers
200
  i2t_submit_btn.click(
201
  fn=image_to_text,
202
- inputs=[i2t_image_input, i2t_prompt_input, i2t_resolution, i2t_steps, i2t_cfg],
203
  outputs=i2t_output_text
204
  )
205
 
206
  vqa_submit_btn.click(
207
  fn=image_to_text,
208
- inputs=[vqa_image_input, vqa_prompt_input, vqa_resolution, vqa_steps, vqa_cfg],
209
  outputs=vqa_output_text
210
  )
211
 
212
  t2i_submit_btn.click(
213
  fn=text_to_image,
214
- inputs=[t2i_prompt_input, t2i_negative_prompt, t2i_num_images, t2i_resolution, t2i_steps, t2i_cfg],
215
  outputs=t2i_gallery
216
  )
217
 
 
49
 
50
  # Image-to-Text Function
51
  @spaces.GPU
52
+ def image_to_text(image, prompt, seed=42, steps=64, cfg=9.0):
53
  try:
54
  transform = get_transform(resolution)
55
 
 
69
  guidance_scale=cfg,
70
  num_inference_steps=steps,
71
  mask_token_embedding="./mask_token_embedding.pth",
72
+ generator=torch.manual_seed(seed),
73
  )
74
 
75
  return output.prompts[0]
 
79
 
80
  # Text-to-Image Function
81
  @spaces.GPU
82
+ def text_to_image(prompt, negative_prompt, num_images=1, seed=42, steps=64, cfg=9.0):
83
  try:
84
  negative_prompt = negative_prompt or "worst quality, low quality, low res, blurry, distortion, watermark, logo, signature, text, jpeg artifacts, signature, sketch, duplicate, ugly, identifying mark"
85
 
 
91
  guidance_scale=cfg,
92
  num_inference_steps=steps,
93
  mask_token_embedding="./mask_token_embedding.pth",
94
+ generator=torch.manual_seed(seed),
95
  )
96
 
97
  return output.images
 
112
  i2t_prompt_input = gr.Textbox(label="Prompt", value="Please describe this image.", placeholder="Enter your prompt here...")
113
 
114
  with gr.Accordion("Advanced Settings", open=False):
115
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2**32 - 1, step=1, value=42)
116
  i2t_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=64, step=1)
117
  i2t_cfg = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=9.0, step=0.5)
118
 
 
141
  vqa_prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your question here...")
142
 
143
  with gr.Accordion("Advanced Settings", open=False):
144
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2**32 - 1, step=1, value=42)
145
  vqa_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=64, step=1)
146
  vqa_cfg = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=9.0, step=0.5)
147
 
 
174
  t2i_num_images = gr.Slider(label="Number of Images", minimum=1, maximum=4, value=1, step=1)
175
 
176
  with gr.Accordion("Advanced Settings", open=False):
177
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2**32 - 1, step=1, value=42)
178
  t2i_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, value=64, step=1)
179
  t2i_cfg = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=9.0, step=0.5)
180
 
 
199
  # Event handlers
200
  i2t_submit_btn.click(
201
  fn=image_to_text,
202
+ inputs=[i2t_image_input, i2t_prompt_input, seed, i2t_steps, i2t_cfg],
203
  outputs=i2t_output_text
204
  )
205
 
206
  vqa_submit_btn.click(
207
  fn=image_to_text,
208
+ inputs=[vqa_image_input, vqa_prompt_input, seed, vqa_steps, vqa_cfg],
209
  outputs=vqa_output_text
210
  )
211
 
212
  t2i_submit_btn.click(
213
  fn=text_to_image,
214
+ inputs=[t2i_prompt_input, t2i_negative_prompt, t2i_num_images, seed, t2i_steps, t2i_cfg],
215
  outputs=t2i_gallery
216
  )
217