Spaces:
Runtime error
Runtime error
File size: 5,704 Bytes
5978fbd b6d3fe5 5978fbd bff370f dc77641 5978fbd bff370f b6d3fe5 bff370f 5978fbd 505b4e4 bff370f b6d3fe5 bff370f b6d3fe5 505b4e4 bff370f 505b4e4 5978fbd 505b4e4 b6d3fe5 5978fbd b6d3fe5 5978fbd b6d3fe5 bff370f cd4f796 b6d3fe5 cd4f796 bff370f b6d3fe5 4dc9242 b6d3fe5 5978fbd b6d3fe5 5978fbd b6d3fe5 505b4e4 b6d3fe5 5978fbd bff370f b6d3fe5 bff370f 004b80f b6d3fe5 5978fbd 505b4e4 5978fbd 505b4e4 b6d3fe5 5978fbd 505b4e4 b6d3fe5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import gradio as gr
import torch
from src.euler_scheduler import MyEulerAncestralDiscreteScheduler
from diffusers.pipelines.auto_pipeline import AutoPipelineForImage2Image
from src.sdxl_inversion_pipeline import SDXLDDIMPipeline
from src.config import RunConfig
from src.editor import ImageEditorDemo
device = "cuda" if torch.cuda.is_available() else "cpu"
scheduler_class = MyEulerAncestralDiscreteScheduler
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True).to(device)
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True).to(device)
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
# if torch.cuda.is_available():
# torch.cuda.max_memory_allocated(device=device)
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
# pipe.enable_xformers_memory_efficient_attention()
# pipe = pipe.to(device)
# else:
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
# pipe = pipe.to(device)
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
num_inversion_steps=4, inversion_max_step=0.6):
config = RunConfig(num_inference_steps=num_inference_steps,
num_inversion_steps=num_inversion_steps,
edit_guidance_scale=edit_guidance_scale,
inversion_max_step=inversion_max_step)
return ImageEditorDemo(pipe_inversion, pipe_inference, input_image, description_prompt, config, device)
# return editor
# image = editor.edit(target_prompt)
# return image
def edit(editor, target_prompt):
if editor is None:
return "Please set image & description before editing."
return editor.edit(target_prompt)
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
# css = """
# #col-container-1 {
# margin: 0 auto;
# max-width: 520px;
# }
# #col-container-2 {
# margin: 0 auto;
# max-width: 520px;
# }
# """
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
# with gr.Blocks(css=css) as demo:
with gr.Blocks() as demo:
gr.Markdown(f"""
This is a demo for our [paper](https://arxiv.org/abs/2312.12540) **RNRI: Regularized Newton Raphson Inversion for Text-to-Image Diffusion Models**.
Image editing using our RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
Take a look at our [project page](https://barakmam.github.io/rnri.github.io/).
""")
with gr.Row():
with gr.Column(elem_id="col-container-1"):
with gr.Row():
input_image = gr.Image(label="Input image", sources=['upload', 'webcam'], type="pil")
with gr.Row():
description_prompt = gr.Text(
label="Image description",
show_label=False,
max_lines=1,
placeholder="Enter your image description",
container=False,
)
with gr.Row():
set_button = gr.Button("Set input image & description", scale=1)
editor_state = gr.State()
with gr.Row():
target_prompt = gr.Text(
label="Edit prompt",
show_label=False,
max_lines=1,
placeholder="Enter your edit prompt",
container=False,
)
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
edit_guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=1.2,
)
num_inference_steps = gr.Slider(
label="Number of RNRI iterations",
minimum=1,
maximum=12,
step=1,
value=4,
)
inversion_max_step = gr.Slider(
label="Inversion strength",
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.6,
)
with gr.Row():
run_button = gr.Button("Edit", scale=1)
with gr.Column(elem_id="col-container-2"):
result = gr.Image(label="Result")
# gr.Examples(
# examples = examples,
# inputs = [prompt]
# )
set_button.click(
fn=set_pipe,
inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
num_inference_steps, inversion_max_step],
outputs=editor_state
)
run_button.click(
fn=edit,
inputs=[editor_state, target_prompt],
outputs=[result]
)
demo.queue().launch()
# im = infer(input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps=4, num_inversion_steps=4,
# inversion_max_step=0.6)
|