Spaces:
Runtime error
Runtime error
minor bug fix
Browse files
app.py
CHANGED
@@ -90,7 +90,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
90 |
|
91 |
|
92 |
@spaces.GPU
|
93 |
-
def set_pipe(inversion_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
94 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
95 |
|
96 |
if input_image is None or not description_prompt:
|
@@ -135,7 +135,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
135 |
|
136 |
|
137 |
@spaces.GPU
|
138 |
-
def edit(inversion_state, target_prompt):
|
139 |
if inversion_state is None:
|
140 |
raise gr.Error("Set inputs before editing.")
|
141 |
# if device == "cuda":
|
@@ -245,42 +245,42 @@ with gr.Blocks(css="style.css") as demo:
|
|
245 |
gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
|
246 |
""")
|
247 |
input_image.change(set_pipe,
|
248 |
-
inputs=[editor_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
249 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
250 |
outputs=[editor_state, is_set_text])
|
251 |
|
252 |
-
description_prompt.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
253 |
num_inference_steps,
|
254 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
255 |
rnri_lr],
|
256 |
outputs=[editor_state, is_set_text])
|
257 |
|
258 |
-
edit_guidance_scale.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
259 |
num_inference_steps,
|
260 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
261 |
rnri_lr],
|
262 |
outputs=[editor_state, is_set_text])
|
263 |
-
num_inference_steps.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
264 |
num_inference_steps,
|
265 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
266 |
rnri_lr],
|
267 |
outputs=[editor_state, is_set_text])
|
268 |
-
inversion_max_step.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
269 |
num_inference_steps,
|
270 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
271 |
rnri_lr],
|
272 |
outputs=[editor_state, is_set_text])
|
273 |
-
rnri_iterations.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
274 |
num_inference_steps,
|
275 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
276 |
rnri_lr],
|
277 |
outputs=[editor_state, is_set_text])
|
278 |
-
rnri_alpha.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
279 |
num_inference_steps,
|
280 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
281 |
rnri_lr],
|
282 |
outputs=[editor_state, is_set_text])
|
283 |
-
rnri_lr.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
284 |
num_inference_steps,
|
285 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
286 |
rnri_lr],
|
@@ -295,7 +295,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
295 |
|
296 |
run_button.click(
|
297 |
fn=edit,
|
298 |
-
inputs=[editor_state, target_prompt],
|
299 |
outputs=[result]
|
300 |
)
|
301 |
|
|
|
90 |
|
91 |
|
92 |
@spaces.GPU
|
93 |
+
def set_pipe(inversion_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
94 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
95 |
|
96 |
if input_image is None or not description_prompt:
|
|
|
135 |
|
136 |
|
137 |
@spaces.GPU
|
138 |
+
def edit(inversion_state, pipe_inference, target_prompt):
|
139 |
if inversion_state is None:
|
140 |
raise gr.Error("Set inputs before editing.")
|
141 |
# if device == "cuda":
|
|
|
245 |
gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
|
246 |
""")
|
247 |
input_image.change(set_pipe,
|
248 |
+
inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
249 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
250 |
outputs=[editor_state, is_set_text])
|
251 |
|
252 |
+
description_prompt.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
253 |
num_inference_steps,
|
254 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
255 |
rnri_lr],
|
256 |
outputs=[editor_state, is_set_text])
|
257 |
|
258 |
+
edit_guidance_scale.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
259 |
num_inference_steps,
|
260 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
261 |
rnri_lr],
|
262 |
outputs=[editor_state, is_set_text])
|
263 |
+
num_inference_steps.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
264 |
num_inference_steps,
|
265 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
266 |
rnri_lr],
|
267 |
outputs=[editor_state, is_set_text])
|
268 |
+
inversion_max_step.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
269 |
num_inference_steps,
|
270 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
271 |
rnri_lr],
|
272 |
outputs=[editor_state, is_set_text])
|
273 |
+
rnri_iterations.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
274 |
num_inference_steps,
|
275 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
276 |
rnri_lr],
|
277 |
outputs=[editor_state, is_set_text])
|
278 |
+
rnri_alpha.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
279 |
num_inference_steps,
|
280 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
281 |
rnri_lr],
|
282 |
outputs=[editor_state, is_set_text])
|
283 |
+
rnri_lr.change(set_pipe, inputs=[editor_state, pipe_inversion, input_image, description_prompt, edit_guidance_scale,
|
284 |
num_inference_steps,
|
285 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
286 |
rnri_lr],
|
|
|
295 |
|
296 |
run_button.click(
|
297 |
fn=edit,
|
298 |
+
inputs=[editor_state, pipe_inference, target_prompt],
|
299 |
outputs=[result]
|
300 |
)
|
301 |
|