Spaces:
Runtime error
Runtime error
trigger set pipe on inputs change
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ from src.sdxl_inversion_pipeline import SDXLDDIMPipeline
|
|
7 |
from src.config import RunConfig
|
8 |
from src.editor import ImageEditorDemo
|
9 |
import spaces
|
|
|
10 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
|
12 |
# if torch.cuda.is_available():
|
@@ -43,6 +44,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
43 |
""")
|
44 |
editor_state = gr.State()
|
45 |
|
|
|
46 |
@spaces.GPU
|
47 |
def set_pipe(image_editor, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
48 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
@@ -50,16 +52,12 @@ with gr.Blocks(css="style.css") as demo:
|
|
50 |
image_editor = image_editor.to('cpu')
|
51 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
52 |
|
53 |
-
print('\n################## 1')
|
54 |
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) # .to('cpu')
|
55 |
-
print('\n################## 2')
|
56 |
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo",
|
57 |
use_safetensors=True) # .to('cpu')
|
58 |
-
print('\n################## 3')
|
59 |
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
60 |
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
61 |
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
62 |
-
print('\n################## 4')
|
63 |
|
64 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
65 |
num_inversion_steps=num_inversion_steps,
|
@@ -68,8 +66,9 @@ with gr.Blocks(css="style.css") as demo:
|
|
68 |
image_editor = ImageEditorDemo(pipe_inversion, pipe_inference, input_image,
|
69 |
description_prompt, config, device,
|
70 |
[rnri_iterations, rnri_alpha, rnri_lr])
|
71 |
-
print('\n################## 5')
|
72 |
return image_editor, "Input has set!"
|
|
|
|
|
73 |
@spaces.GPU
|
74 |
def edit(editor, target_prompt):
|
75 |
if editor is None:
|
@@ -86,7 +85,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
86 |
with gr.Column(elem_id="col-container-1"):
|
87 |
with gr.Row():
|
88 |
input_image = gr.Image(label="Input image", sources=['upload', 'webcam'], type="pil")
|
89 |
-
|
90 |
with gr.Row():
|
91 |
description_prompt = gr.Text(
|
92 |
label="Image description",
|
@@ -172,12 +170,54 @@ with gr.Blocks(css="style.css") as demo:
|
|
172 |
inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
173 |
)
|
174 |
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
run_button.click(
|
183 |
fn=edit,
|
@@ -186,6 +226,3 @@ with gr.Blocks(css="style.css") as demo:
|
|
186 |
)
|
187 |
|
188 |
demo.queue().launch()
|
189 |
-
|
190 |
-
# im = infer(input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps=4, num_inversion_steps=4,
|
191 |
-
# inversion_max_step=0.6)
|
|
|
7 |
from src.config import RunConfig
|
8 |
from src.editor import ImageEditorDemo
|
9 |
import spaces
|
10 |
+
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
# if torch.cuda.is_available():
|
|
|
44 |
""")
|
45 |
editor_state = gr.State()
|
46 |
|
47 |
+
|
48 |
@spaces.GPU
|
49 |
def set_pipe(image_editor, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
50 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
|
|
52 |
image_editor = image_editor.to('cpu')
|
53 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
54 |
|
|
|
55 |
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) # .to('cpu')
|
|
|
56 |
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo",
|
57 |
use_safetensors=True) # .to('cpu')
|
|
|
58 |
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
59 |
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
60 |
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
|
|
61 |
|
62 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
63 |
num_inversion_steps=num_inversion_steps,
|
|
|
66 |
image_editor = ImageEditorDemo(pipe_inversion, pipe_inference, input_image,
|
67 |
description_prompt, config, device,
|
68 |
[rnri_iterations, rnri_alpha, rnri_lr])
|
|
|
69 |
return image_editor, "Input has set!"
|
70 |
+
|
71 |
+
|
72 |
@spaces.GPU
|
73 |
def edit(editor, target_prompt):
|
74 |
if editor is None:
|
|
|
85 |
with gr.Column(elem_id="col-container-1"):
|
86 |
with gr.Row():
|
87 |
input_image = gr.Image(label="Input image", sources=['upload', 'webcam'], type="pil")
|
|
|
88 |
with gr.Row():
|
89 |
description_prompt = gr.Text(
|
90 |
label="Image description",
|
|
|
170 |
inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
171 |
)
|
172 |
|
173 |
+
input_image.change(set_pipe,
|
174 |
+
inputs=[editor_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
175 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
176 |
+
outputs=[editor_state, is_set_text])
|
177 |
+
|
178 |
+
description_prompt.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
179 |
+
num_inference_steps,
|
180 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
181 |
+
rnri_lr],
|
182 |
+
outputs=[editor_state, is_set_text])
|
183 |
+
|
184 |
+
edit_guidance_scale.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
185 |
+
num_inference_steps,
|
186 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
187 |
+
rnri_lr],
|
188 |
+
outputs=[editor_state, is_set_text])
|
189 |
+
num_inference_steps.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
190 |
+
num_inference_steps,
|
191 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
192 |
+
rnri_lr],
|
193 |
+
outputs=[editor_state, is_set_text])
|
194 |
+
inversion_max_step.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
195 |
+
num_inference_steps,
|
196 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
197 |
+
rnri_lr],
|
198 |
+
outputs=[editor_state, is_set_text])
|
199 |
+
rnri_iterations.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
200 |
+
num_inference_steps,
|
201 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
202 |
+
rnri_lr],
|
203 |
+
outputs=[editor_state, is_set_text])
|
204 |
+
rnri_alpha.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
205 |
+
num_inference_steps,
|
206 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
207 |
+
rnri_lr],
|
208 |
+
outputs=[editor_state, is_set_text])
|
209 |
+
rnri_lr.change(set_pipe, inputs=[editor_state, input_image, description_prompt, edit_guidance_scale,
|
210 |
+
num_inference_steps,
|
211 |
+
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha,
|
212 |
+
rnri_lr],
|
213 |
+
outputs=[editor_state, is_set_text])
|
214 |
+
|
215 |
+
# set_button.click(
|
216 |
+
# fn=set_pipe,
|
217 |
+
# inputs=[editor_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
218 |
+
# num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
219 |
+
# outputs=[editor_state, is_set_text],
|
220 |
+
# )
|
221 |
|
222 |
run_button.click(
|
223 |
fn=edit,
|
|
|
226 |
)
|
227 |
|
228 |
demo.queue().launch()
|
|
|
|
|
|