Spaces:
Runtime error
Runtime error
make pipes global to reduce set_pipe runtime
Browse files
app.py
CHANGED
@@ -35,6 +35,15 @@ if torch.cuda.is_available():
|
|
35 |
else:
|
36 |
power_device = "CPU"
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
# with gr.Blocks(css=css) as demo:
|
39 |
with gr.Blocks(css="style.css") as demo:
|
40 |
gr.Markdown(f""" # Real Time Editing with RNRI Inversion 🍎⚡️
|
@@ -50,14 +59,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
50 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
51 |
if image_editor is not None:
|
52 |
image_editor = image_editor.to('cpu')
|
53 |
-
scheduler_class = MyEulerAncestralDiscreteScheduler
|
54 |
-
|
55 |
-
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) # .to('cpu')
|
56 |
-
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo",
|
57 |
-
use_safetensors=True) # .to('cpu')
|
58 |
-
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
59 |
-
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
60 |
-
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
61 |
|
62 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
63 |
num_inversion_steps=num_inversion_steps,
|
@@ -79,8 +80,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
79 |
image = editor.edit(target_prompt)
|
80 |
return image
|
81 |
|
82 |
-
|
83 |
-
gr.Markdown(f"""running on {power_device}""")
|
84 |
with gr.Row():
|
85 |
with gr.Column(elem_id="col-container-1"):
|
86 |
with gr.Row():
|
|
|
35 |
else:
|
36 |
power_device = "CPU"
|
37 |
|
38 |
+
scheduler_class = MyEulerAncestralDiscreteScheduler
|
39 |
+
|
40 |
+
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) # .to('cpu')
|
41 |
+
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo",
|
42 |
+
use_safetensors=True) # .to('cpu')
|
43 |
+
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
44 |
+
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
45 |
+
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
46 |
+
|
47 |
# with gr.Blocks(css=css) as demo:
|
48 |
with gr.Blocks(css="style.css") as demo:
|
49 |
gr.Markdown(f""" # Real Time Editing with RNRI Inversion 🍎⚡️
|
|
|
59 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
60 |
if image_editor is not None:
|
61 |
image_editor = image_editor.to('cpu')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
64 |
num_inversion_steps=num_inversion_steps,
|
|
|
80 |
image = editor.edit(target_prompt)
|
81 |
return image
|
82 |
|
|
|
|
|
83 |
with gr.Row():
|
84 |
with gr.Column(elem_id="col-container-1"):
|
85 |
with gr.Row():
|