barakmeiri commited on
Commit
cb0c1d5
·
verified ·
1 Parent(s): e4eecf5

Remove image trigger

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -113,7 +113,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
113
  info="Enter your image description ",
114
  show_label=False,
115
  max_lines=1,
116
- placeholder="a cake on a table",
117
  container=False,
118
  )
119
 
@@ -177,7 +177,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
177
  info="Enter your edit prompt",
178
  show_label=False,
179
  max_lines=1,
180
- placeholder="an oreo cake on a table",
181
  container=False,
182
  )
183
 
@@ -193,10 +193,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
193
 
194
  gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
195
  """)
196
- input_image.change(set_pipe,
197
- inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
198
- num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
199
- outputs=[inv_state, is_set_text], trigger_mode='once')
200
 
201
  description_prompt.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
202
  num_inference_steps,
 
113
  info="Enter your image description ",
114
  show_label=False,
115
  max_lines=1,
116
+ placeholder="Example: a cake on a table",
117
  container=False,
118
  )
119
 
 
177
  info="Enter your edit prompt",
178
  show_label=False,
179
  max_lines=1,
180
+ placeholder="Example: an oreo cake on a table",
181
  container=False,
182
  )
183
 
 
193
 
194
  gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
195
  """)
196
+ # input_image.change(set_pipe,
197
+ # inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
198
+ # num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
199
+ # outputs=[inv_state, is_set_text], trigger_mode='once')
200
 
201
  description_prompt.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
202
  num_inference_steps,