Barak1 commited on
Commit
97ef95f
·
1 Parent(s): 474bb95
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -90,7 +90,7 @@ with gr.Blocks(css="style.css") as demo:
90
 
91
 
92
  @spaces.GPU
93
- def set_pipe(inversion_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
94
  num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
95
 
96
  if input_image is None or not description_prompt:
@@ -135,7 +135,7 @@ with gr.Blocks(css="style.css") as demo:
135
 
136
 
137
  @spaces.GPU
138
- def edit(inversion_state, pipe_inference, target_prompt):
139
  if inversion_state is None:
140
  raise gr.Error("Set inputs before editing.")
141
  # if device == "cuda":
@@ -243,42 +243,42 @@ with gr.Blocks(css="style.css") as demo:
243
  gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
244
  """)
245
  input_image.change(set_pipe,
246
- inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
247
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
248
  outputs=[editor_state, is_set_text])
249
 
250
- description_prompt.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
251
  num_inference_steps,
252
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
253
  rnri_lr],
254
  outputs=[editor_state, is_set_text])
255
 
256
- edit_guidance_scale.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
257
  num_inference_steps,
258
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
259
  rnri_lr],
260
  outputs=[editor_state, is_set_text])
261
- num_inference_steps.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
262
  num_inference_steps,
263
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
264
  rnri_lr],
265
  outputs=[editor_state, is_set_text])
266
- inversion_max_step.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
267
  num_inference_steps,
268
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
269
  rnri_lr],
270
  outputs=[editor_state, is_set_text])
271
- rnri_iterations.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
272
  num_inference_steps,
273
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
274
  rnri_lr],
275
  outputs=[editor_state, is_set_text])
276
- rnri_alpha.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
277
  num_inference_steps,
278
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
279
  rnri_lr],
280
  outputs=[editor_state, is_set_text])
281
- rnri_lr.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
282
  num_inference_steps,
283
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
284
  rnri_lr],
@@ -293,7 +293,7 @@ with gr.Blocks(css="style.css") as demo:
293
 
294
  run_button.click(
295
  fn=edit,
296
- inputs=[editor_state, pipe_inference, target_prompt],
297
  outputs=[result]
298
  )
299
 
 
90
 
91
 
92
  @spaces.GPU
93
+ def set_pipe(inversion_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
94
  num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
95
 
96
  if input_image is None or not description_prompt:
 
135
 
136
 
137
  @spaces.GPU
138
+ def edit(inversion_state, target_prompt):
139
  if inversion_state is None:
140
  raise gr.Error("Set inputs before editing.")
141
  # if device == "cuda":
 
243
  gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
244
  """)
245
  input_image.change(set_pipe,
246
+ inputs=[editor_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
247
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
248
  outputs=[editor_state, is_set_text])
249
 
250
+ description_prompt.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
251
  num_inference_steps,
252
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
253
  rnri_lr],
254
  outputs=[editor_state, is_set_text])
255
 
256
+ edit_guidance_scale.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
257
  num_inference_steps,
258
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
259
  rnri_lr],
260
  outputs=[editor_state, is_set_text])
261
+ num_inference_steps.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
262
  num_inference_steps,
263
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
264
  rnri_lr],
265
  outputs=[editor_state, is_set_text])
266
+ inversion_max_step.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
267
  num_inference_steps,
268
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
269
  rnri_lr],
270
  outputs=[editor_state, is_set_text])
271
+ rnri_iterations.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
272
  num_inference_steps,
273
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
274
  rnri_lr],
275
  outputs=[editor_state, is_set_text])
276
+ rnri_alpha.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
277
  num_inference_steps,
278
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
279
  rnri_lr],
280
  outputs=[editor_state, is_set_text])
281
+ rnri_lr.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
282
  num_inference_steps,
283
  num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
284
  rnri_lr],
 
293
 
294
  run_button.click(
295
  fn=edit,
296
+ inputs=[editor_state, target_prompt],
297
  outputs=[result]
298
  )
299