Spaces:
Runtime error
Runtime error
clean code
Browse files
app.py
CHANGED
@@ -56,42 +56,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
56 |
Image editing using RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
|
57 |
Take a look at the [project page](https://barakmam.github.io/rnri.github.io/).
|
58 |
""")
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
# @spaces.GPU
|
63 |
-
def set_pipe1(image_editor, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
64 |
-
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
65 |
-
|
66 |
-
if device == 'cuda':
|
67 |
-
# if image_editor is not None:
|
68 |
-
# image_editor = image_editor.to('cpu')
|
69 |
-
|
70 |
-
torch.cuda.empty_cache()
|
71 |
-
|
72 |
-
if input_image is None or not description_prompt:
|
73 |
-
return None, "Please set all inputs."
|
74 |
-
|
75 |
-
if isinstance(num_inference_steps, str): num_inference_steps = int(num_inference_steps)
|
76 |
-
if isinstance(num_inversion_steps, str): num_inversion_steps = int(num_inversion_steps)
|
77 |
-
if isinstance(edit_guidance_scale, str): edit_guidance_scale = float(edit_guidance_scale)
|
78 |
-
if isinstance(inversion_max_step, str): inversion_max_step = float(inversion_max_step)
|
79 |
-
if isinstance(rnri_iterations, str): rnri_iterations = int(rnri_iterations)
|
80 |
-
if isinstance(rnri_alpha, str): rnri_alpha = float(rnri_alpha)
|
81 |
-
if isinstance(rnri_lr, str): rnri_lr = float(rnri_lr)
|
82 |
-
|
83 |
-
config = RunConfig(num_inference_steps=num_inference_steps,
|
84 |
-
num_inversion_steps=num_inversion_steps,
|
85 |
-
edit_guidance_scale=edit_guidance_scale,
|
86 |
-
inversion_max_step=inversion_max_step)
|
87 |
-
image_editor = ImageEditorDemo(pipe_inversion, pipe_inference, input_image,
|
88 |
-
description_prompt, config, device,
|
89 |
-
[rnri_iterations, rnri_alpha, rnri_lr])
|
90 |
-
return image_editor, "Input has set!"
|
91 |
|
92 |
|
93 |
@spaces.GPU
|
94 |
-
def set_pipe(
|
95 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
96 |
|
97 |
if input_image is None or not description_prompt:
|
@@ -131,29 +100,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
131 |
gr.Info('Input has set!')
|
132 |
return inversion_state, "Input has set!"
|
133 |
|
134 |
-
|
135 |
-
# @spaces.GPU
|
136 |
-
def edit1(editor, target_prompt):
|
137 |
-
if editor is None:
|
138 |
-
raise gr.Error("Set inputs before editing.")
|
139 |
-
|
140 |
-
# if device == "cuda":
|
141 |
-
# image = editor.to(device).edit(target_prompt)
|
142 |
-
# else:
|
143 |
-
image = editor.edit(target_prompt)
|
144 |
-
return image
|
145 |
-
|
146 |
-
|
147 |
@spaces.GPU
|
148 |
def edit(inversion_state, target_prompt):
|
149 |
if inversion_state is None:
|
150 |
raise gr.Error("Set inputs before editing. Progress indication below")
|
151 |
-
# if device == "cuda":
|
152 |
-
# image = editor.to(device).edit(target_prompt)
|
153 |
-
# else:
|
154 |
-
|
155 |
-
# if device == 'cuda':
|
156 |
-
# torch.cuda.empty_cache()
|
157 |
|
158 |
print(f"#### 5 #### pipe_inversion.device: {pipe_inversion.device}")
|
159 |
print(f"#### 6 #### pipe_inference.device: {pipe_inference.device}")
|
@@ -161,11 +111,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
161 |
image = ImageEditorDemo.edit(pipe_inference, target_prompt, inversion_state['latent'], inversion_state['noise'],
|
162 |
inversion_state['cfg'], inversion_state['cfg'].edit_guidance_scale)
|
163 |
|
164 |
-
# if device == 'cuda':
|
165 |
-
# pipe_inference.to('cpu')
|
166 |
-
# torch.cuda.empty_cache()
|
167 |
-
|
168 |
-
|
169 |
print(f"#### 7 #### pipe_inversion.device: {pipe_inversion.device}")
|
170 |
print(f"#### 8 #### pipe_inference.device: {pipe_inference.device}")
|
171 |
return image
|
@@ -262,57 +207,57 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
262 |
gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
|
263 |
""")
|
264 |
input_image.change(set_pipe,
|
265 |
-
inputs=[
|
266 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
267 |
-
outputs=[
|
268 |
|
269 |
-
description_prompt.change(set_pipe, inputs=[
|
270 |
num_inference_steps,
|
271 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
272 |
rnri_lr],
|
273 |
-
outputs=[
|
274 |
|
275 |
-
edit_guidance_scale.change(set_pipe, inputs=[
|
276 |
num_inference_steps,
|
277 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
278 |
rnri_lr],
|
279 |
-
outputs=[
|
280 |
-
num_inference_steps.change(set_pipe, inputs=[
|
281 |
num_inference_steps,
|
282 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
283 |
rnri_lr],
|
284 |
-
outputs=[
|
285 |
-
inversion_max_step.change(set_pipe, inputs=[
|
286 |
num_inference_steps,
|
287 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
288 |
rnri_lr],
|
289 |
-
outputs=[
|
290 |
-
rnri_iterations.change(set_pipe, inputs=[
|
291 |
num_inference_steps,
|
292 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
293 |
rnri_lr],
|
294 |
-
outputs=[
|
295 |
-
rnri_alpha.change(set_pipe, inputs=[
|
296 |
num_inference_steps,
|
297 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
298 |
rnri_lr],
|
299 |
-
outputs=[
|
300 |
-
rnri_lr.change(set_pipe, inputs=[
|
301 |
num_inference_steps,
|
302 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
303 |
rnri_lr],
|
304 |
-
outputs=[
|
305 |
|
306 |
# set_button.click(
|
307 |
# fn=set_pipe,
|
308 |
-
# inputs=[
|
309 |
# num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
310 |
-
# outputs=[
|
311 |
# )
|
312 |
|
313 |
run_button.click(
|
314 |
fn=edit,
|
315 |
-
inputs=[
|
316 |
outputs=[result]
|
317 |
)
|
318 |
|
|
|
56 |
Image editing using RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
|
57 |
Take a look at the [project page](https://barakmam.github.io/rnri.github.io/).
|
58 |
""")
|
59 |
+
inv_state = gr.State()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
|
62 |
@spaces.GPU
|
63 |
+
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
64 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
65 |
|
66 |
if input_image is None or not description_prompt:
|
|
|
100 |
gr.Info('Input has set!')
|
101 |
return inversion_state, "Input has set!"
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
@spaces.GPU
|
104 |
def edit(inversion_state, target_prompt):
|
105 |
if inversion_state is None:
|
106 |
raise gr.Error("Set inputs before editing. Progress indication below")
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
print(f"#### 5 #### pipe_inversion.device: {pipe_inversion.device}")
|
109 |
print(f"#### 6 #### pipe_inference.device: {pipe_inference.device}")
|
|
|
111 |
image = ImageEditorDemo.edit(pipe_inference, target_prompt, inversion_state['latent'], inversion_state['noise'],
|
112 |
inversion_state['cfg'], inversion_state['cfg'].edit_guidance_scale)
|
113 |
|
|
|
|
|
|
|
|
|
|
|
114 |
print(f"#### 7 #### pipe_inversion.device: {pipe_inversion.device}")
|
115 |
print(f"#### 8 #### pipe_inference.device: {pipe_inference.device}")
|
116 |
return image
|
|
|
207 |
gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
|
208 |
""")
|
209 |
input_image.change(set_pipe,
|
210 |
+
inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
211 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
212 |
+
outputs=[inv_state, is_set_text])
|
213 |
|
214 |
+
description_prompt.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
215 |
num_inference_steps,
|
216 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
217 |
rnri_lr],
|
218 |
+
outputs=[inv_state, is_set_text])
|
219 |
|
220 |
+
edit_guidance_scale.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
221 |
num_inference_steps,
|
222 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
223 |
rnri_lr],
|
224 |
+
outputs=[inv_state, is_set_text])
|
225 |
+
num_inference_steps.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
226 |
num_inference_steps,
|
227 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
228 |
rnri_lr],
|
229 |
+
outputs=[inv_state, is_set_text])
|
230 |
+
inversion_max_step.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
231 |
num_inference_steps,
|
232 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
233 |
rnri_lr],
|
234 |
+
outputs=[inv_state, is_set_text])
|
235 |
+
rnri_iterations.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
236 |
num_inference_steps,
|
237 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
238 |
rnri_lr],
|
239 |
+
outputs=[inv_state, is_set_text])
|
240 |
+
rnri_alpha.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
241 |
num_inference_steps,
|
242 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
243 |
rnri_lr],
|
244 |
+
outputs=[inv_state, is_set_text])
|
245 |
+
rnri_lr.change(set_pipe, inputs=[input_image, description_prompt, edit_guidance_scale,
|
246 |
num_inference_steps,
|
247 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
248 |
rnri_lr],
|
249 |
+
outputs=[inv_state, is_set_text])
|
250 |
|
251 |
# set_button.click(
|
252 |
# fn=set_pipe,
|
253 |
+
# inputs=[inv_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
254 |
# num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
255 |
+
# outputs=[inv_state, is_set_text],
|
256 |
# )
|
257 |
|
258 |
run_button.click(
|
259 |
fn=edit,
|
260 |
+
inputs=[inv_state, target_prompt],
|
261 |
outputs=[result]
|
262 |
)
|
263 |
|