Barak1 commited on
Commit
067c128
·
2 Parent(s): 505b4e4 3104476

Merge branch 'main' of https://huggingface.co/spaces/barakmeiri/RNRI

Browse files
Files changed (2) hide show
  1. app.py +8 -6
  2. style.css +16 -0
app.py CHANGED
@@ -6,7 +6,7 @@ from diffusers.pipelines.auto_pipeline import AutoPipelineForImage2Image
6
  from src.sdxl_inversion_pipeline import SDXLDDIMPipeline
7
  from src.config import RunConfig
8
  from src.editor import ImageEditorDemo
9
-
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
  scheduler_class = MyEulerAncestralDiscreteScheduler
@@ -27,7 +27,7 @@ pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.
27
  # pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
28
  # pipe = pipe.to(device)
29
 
30
-
31
  def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
32
  num_inversion_steps=4, inversion_max_step=0.6):
33
  config = RunConfig(num_inference_steps=num_inference_steps,
@@ -69,8 +69,8 @@ else:
69
  power_device = "CPU"
70
 
71
  # with gr.Blocks(css=css) as demo:
72
- with gr.Blocks() as demo:
73
- gr.Markdown(f"""
74
  This is a demo for our [paper](https://arxiv.org/abs/2312.12540) **RNRI: Regularized Newton Raphson Inversion for Text-to-Image Diffusion Models**.
75
  Image editing using our RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
76
  Take a look at our [project page](https://barakmam.github.io/rnri.github.io/).
@@ -83,9 +83,10 @@ with gr.Blocks() as demo:
83
  with gr.Row():
84
  description_prompt = gr.Text(
85
  label="Image description",
 
86
  show_label=False,
87
  max_lines=1,
88
- placeholder="Enter your image description",
89
  container=False,
90
  )
91
 
@@ -96,9 +97,10 @@ with gr.Blocks() as demo:
96
  with gr.Row():
97
  target_prompt = gr.Text(
98
  label="Edit prompt",
 
99
  show_label=False,
100
  max_lines=1,
101
- placeholder="Enter your edit prompt",
102
  container=False,
103
  )
104
 
 
6
  from src.sdxl_inversion_pipeline import SDXLDDIMPipeline
7
  from src.config import RunConfig
8
  from src.editor import ImageEditorDemo
9
+ import spaces
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
  scheduler_class = MyEulerAncestralDiscreteScheduler
 
27
  # pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
28
  # pipe = pipe.to(device)
29
 
30
+ @spaces.GPU
31
  def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
32
  num_inversion_steps=4, inversion_max_step=0.6):
33
  config = RunConfig(num_inference_steps=num_inference_steps,
 
69
  power_device = "CPU"
70
 
71
  # with gr.Blocks(css=css) as demo:
72
+ with gr.Blocks(css="style.css") as demo:
73
+ gr.Markdown(f""" # Real Time Editing with RNRI Inversion 🍎⚡️
74
  This is a demo for our [paper](https://arxiv.org/abs/2312.12540) **RNRI: Regularized Newton Raphson Inversion for Text-to-Image Diffusion Models**.
75
  Image editing using our RNRI for inversion demonstrates significant speed-up and improved quality compared to previous state-of-the-art methods.
76
  Take a look at our [project page](https://barakmam.github.io/rnri.github.io/).
 
83
  with gr.Row():
84
  description_prompt = gr.Text(
85
  label="Image description",
86
+ info = "Enter your image description ",
87
  show_label=False,
88
  max_lines=1,
89
+ placeholder="a cake on a table",
90
  container=False,
91
  )
92
 
 
97
  with gr.Row():
98
  target_prompt = gr.Text(
99
  label="Edit prompt",
100
+ info = "Enter your edit prompt",
101
  show_label=False,
102
  max_lines=1,
103
+ placeholder="an oreo cake on a table",
104
  container=False,
105
  )
106
 
style.css ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #component-0{
3
+ max-width: 900px;
4
+ margin: 0 auto;
5
+ }
6
+
7
+ #description, h1 {
8
+ text-align: center;
9
+ }
10
+
11
+ #duplicate-button {
12
+ margin: auto;
13
+ color: #fff;
14
+ background: #1565c0;
15
+ border-radius: 100vh;
16
+ }