Spaces:
Running
on
A10G
Running
on
A10G
set_pipe , trigger_mode='once'
Browse files
app.py
CHANGED
|
@@ -209,44 +209,44 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 209 |
input_image.change(set_pipe,
|
| 210 |
inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
| 211 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
| 212 |
-
outputs=[inv_state, is_set_text])
|
| 213 |
|
| 214 |
description_prompt.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 215 |
num_inference_steps,
|
| 216 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 217 |
rnri_lr],
|
| 218 |
-
outputs=[inv_state, is_set_text])
|
| 219 |
|
| 220 |
edit_guidance_scale.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 221 |
num_inference_steps,
|
| 222 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 223 |
rnri_lr],
|
| 224 |
-
outputs=[inv_state, is_set_text])
|
| 225 |
num_inference_steps.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 226 |
num_inference_steps,
|
| 227 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 228 |
rnri_lr],
|
| 229 |
-
outputs=[inv_state, is_set_text])
|
| 230 |
inversion_max_step.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 231 |
num_inference_steps,
|
| 232 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 233 |
rnri_lr],
|
| 234 |
-
outputs=[inv_state, is_set_text])
|
| 235 |
rnri_iterations.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 236 |
num_inference_steps,
|
| 237 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 238 |
rnri_lr],
|
| 239 |
-
outputs=[inv_state, is_set_text])
|
| 240 |
rnri_alpha.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 241 |
num_inference_steps,
|
| 242 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 243 |
rnri_lr],
|
| 244 |
-
outputs=[inv_state, is_set_text])
|
| 245 |
rnri_lr.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 246 |
num_inference_steps,
|
| 247 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 248 |
rnri_lr],
|
| 249 |
-
outputs=[inv_state, is_set_text])
|
| 250 |
|
| 251 |
# set_button.click(
|
| 252 |
# fn=set_pipe,
|
|
|
|
| 209 |
input_image.change(set_pipe,
|
| 210 |
inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
| 211 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
| 212 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 213 |
|
| 214 |
description_prompt.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 215 |
num_inference_steps,
|
| 216 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 217 |
rnri_lr],
|
| 218 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 219 |
|
| 220 |
edit_guidance_scale.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 221 |
num_inference_steps,
|
| 222 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 223 |
rnri_lr],
|
| 224 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 225 |
num_inference_steps.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 226 |
num_inference_steps,
|
| 227 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 228 |
rnri_lr],
|
| 229 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 230 |
inversion_max_step.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 231 |
num_inference_steps,
|
| 232 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 233 |
rnri_lr],
|
| 234 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 235 |
rnri_iterations.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 236 |
num_inference_steps,
|
| 237 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 238 |
rnri_lr],
|
| 239 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 240 |
rnri_alpha.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 241 |
num_inference_steps,
|
| 242 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 243 |
rnri_lr],
|
| 244 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 245 |
rnri_lr.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
| 246 |
num_inference_steps,
|
| 247 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
| 248 |
rnri_lr],
|
| 249 |
+
outputs=[inv_state, is_set_text], trigger_mode='once')
|
| 250 |
|
| 251 |
# set_button.click(
|
| 252 |
# fn=set_pipe,
|