Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -66,27 +66,54 @@ def resize_image(input_image, resolution):
|
|
66 |
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
|
67 |
return img
|
68 |
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
if original_image is None:
|
71 |
raise gr.Error('Please upload the input image')
|
72 |
-
if input_mask is None:
|
73 |
-
raise gr.Error("Please upload a white-black Mask image")
|
74 |
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
if invert_mask:
|
79 |
-
original_mask
|
80 |
-
|
81 |
-
|
|
|
|
|
82 |
init_image = Image.fromarray(masked_image.astype(np.uint8)).convert("RGB")
|
83 |
mask_image = Image.fromarray(original_mask.astype(np.uint8)).convert("RGB")
|
84 |
-
|
|
|
|
|
85 |
image = pipe(
|
86 |
-
[prompt]*2,
|
87 |
-
init_image,
|
88 |
-
mask_image,
|
89 |
-
num_inference_steps=num_inference_steps,
|
90 |
guidance_scale=guidance_scale,
|
91 |
generator=generator,
|
92 |
brushnet_conditioning_scale=float(control_strength),
|
@@ -94,43 +121,220 @@ def process(original_image, input_mask, prompt, negative_prompt, blended, invert
|
|
94 |
).images
|
95 |
|
96 |
if blended:
|
97 |
-
if control_strength
|
98 |
raise gr.Error('Using blurred blending with control strength less than 1.0 is not allowed')
|
99 |
-
blended_image
|
|
|
100 |
mask_blurred = cv2.GaussianBlur(mask*255, (21, 21), 0)/255
|
101 |
mask_blurred = mask_blurred[:,:,np.newaxis]
|
102 |
-
mask = 1
|
103 |
for image_i in image:
|
104 |
-
image_np
|
105 |
-
image_pasted
|
106 |
-
|
|
|
107 |
blended_image.append(Image.fromarray(image_pasted))
|
108 |
-
|
|
|
109 |
|
110 |
return image
|
111 |
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
114 |
with gr.Row():
|
115 |
with gr.Column():
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
with gr.Column():
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
-
inputs = [original_image, input_mask, prompt, negative_prompt, blended, invert_mask, control_strength, seed, randomize_seed, guidance_scale, num_inference_steps]
|
133 |
-
run_button.click(fn=process, inputs=inputs, outputs=[result_gallery])
|
134 |
|
135 |
-
|
136 |
-
demo.launch(show_api=True)
|
|
|
66 |
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
|
67 |
return img
|
68 |
|
69 |
+
|
70 |
+
def process(input_image,
|
71 |
+
original_image,
|
72 |
+
original_mask,
|
73 |
+
input_mask,
|
74 |
+
selected_points,
|
75 |
+
prompt,
|
76 |
+
negative_prompt,
|
77 |
+
blended,
|
78 |
+
invert_mask,
|
79 |
+
control_strength,
|
80 |
+
seed,
|
81 |
+
randomize_seed,
|
82 |
+
guidance_scale,
|
83 |
+
num_inference_steps):
|
84 |
if original_image is None:
|
85 |
raise gr.Error('Please upload the input image')
|
86 |
+
if (original_mask is None or len(selected_points)==0) and input_mask is None:
|
87 |
+
raise gr.Error("Please click the region where you hope unchanged/changed, or upload a white-black Mask image")
|
88 |
|
89 |
+
# load example image
|
90 |
+
if isinstance(original_image, int):
|
91 |
+
image_name = image_examples[original_image][0]
|
92 |
+
original_image = cv2.imread(image_name)
|
93 |
+
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
|
94 |
+
|
95 |
+
if input_mask is not None:
|
96 |
+
H,W=original_image.shape[:2]
|
97 |
+
original_mask = cv2.resize(input_mask, (W, H))
|
98 |
+
else:
|
99 |
+
original_mask = np.clip(255 - original_mask, 0, 255).astype(np.uint8)
|
100 |
|
101 |
if invert_mask:
|
102 |
+
original_mask=255-original_mask
|
103 |
+
|
104 |
+
mask = 1.*(original_mask.sum(-1)>255)[:,:,np.newaxis]
|
105 |
+
masked_image = original_image * (1-mask)
|
106 |
+
|
107 |
init_image = Image.fromarray(masked_image.astype(np.uint8)).convert("RGB")
|
108 |
mask_image = Image.fromarray(original_mask.astype(np.uint8)).convert("RGB")
|
109 |
+
|
110 |
+
generator = torch.Generator("cuda").manual_seed(random.randint(0,2147483647) if randomize_seed else seed)
|
111 |
+
|
112 |
image = pipe(
|
113 |
+
[prompt]*2,
|
114 |
+
init_image,
|
115 |
+
mask_image,
|
116 |
+
num_inference_steps=num_inference_steps,
|
117 |
guidance_scale=guidance_scale,
|
118 |
generator=generator,
|
119 |
brushnet_conditioning_scale=float(control_strength),
|
|
|
121 |
).images
|
122 |
|
123 |
if blended:
|
124 |
+
if control_strength<1.0:
|
125 |
raise gr.Error('Using blurred blending with control strength less than 1.0 is not allowed')
|
126 |
+
blended_image=[]
|
127 |
+
# blur, you can adjust the parameters for better performance
|
128 |
mask_blurred = cv2.GaussianBlur(mask*255, (21, 21), 0)/255
|
129 |
mask_blurred = mask_blurred[:,:,np.newaxis]
|
130 |
+
mask = 1-(1-mask) * (1-mask_blurred)
|
131 |
for image_i in image:
|
132 |
+
image_np=np.array(image_i)
|
133 |
+
image_pasted=original_image * (1-mask) + image_np*mask
|
134 |
+
|
135 |
+
image_pasted=image_pasted.astype(image_np.dtype)
|
136 |
blended_image.append(Image.fromarray(image_pasted))
|
137 |
+
|
138 |
+
image=blended_image
|
139 |
|
140 |
return image
|
141 |
|
142 |
+
block = gr.Blocks(
|
143 |
+
theme=gr.themes.Soft(
|
144 |
+
radius_size=gr.themes.sizes.radius_none,
|
145 |
+
text_size=gr.themes.sizes.text_md
|
146 |
+
)
|
147 |
+
).queue()
|
148 |
+
with block:
|
149 |
with gr.Row():
|
150 |
with gr.Column():
|
151 |
+
|
152 |
+
gr.HTML(f"""
|
153 |
+
<div style="text-align: center;">
|
154 |
+
<h1>BrushNet: A Plug-and-Play Image Inpainting Model with Decomposed Dual-Branch Diffusion</h1>
|
155 |
+
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
156 |
+
<a href=""></a>
|
157 |
+
<a href='https://tencentarc.github.io/BrushNet/'><img src='https://img.shields.io/badge/Project_Page-BrushNet-green' alt='Project Page'></a>
|
158 |
+
<a href='https://arxiv.org/abs/2403.06976'><img src='https://img.shields.io/badge/Paper-Arxiv-blue'></a>
|
159 |
+
</div>
|
160 |
+
</br>
|
161 |
+
</div>
|
162 |
+
""")
|
163 |
+
|
164 |
+
|
165 |
+
with gr.Accordion(label="🧭 Instructions:", open=True, elem_id="accordion"):
|
166 |
+
with gr.Row(equal_height=True):
|
167 |
+
gr.Markdown("""
|
168 |
+
- ⭐️ <b>step1: </b>Upload or select one image from Example
|
169 |
+
- ⭐️ <b>step2: </b>Click on Input-image to select the object to be retained (or upload a white-black Mask image, in which white color indicates the region you want to keep unchanged). You can tick the 'Invert Mask' box to switch region unchanged and change.
|
170 |
+
- ⭐️ <b>step3: </b>Input prompt for generating new contents
|
171 |
+
- ⭐️ <b>step4: </b>Click Run button
|
172 |
+
""")
|
173 |
+
with gr.Row():
|
174 |
+
with gr.Column():
|
175 |
+
with gr.Column(elem_id="Input"):
|
176 |
+
with gr.Row():
|
177 |
+
with gr.Tabs(elem_classes=["feedback"]):
|
178 |
+
with gr.TabItem("Input Image"):
|
179 |
+
input_image = gr.Image(type="numpy", label="input",scale=2, height=640)
|
180 |
+
original_image = gr.State(value=None,label="index")
|
181 |
+
original_mask = gr.State(value=None)
|
182 |
+
selected_points = gr.State([],label="select points")
|
183 |
+
with gr.Row(elem_id="Seg"):
|
184 |
+
radio = gr.Radio(['foreground', 'background'], label='Click to seg: ', value='foreground',scale=2)
|
185 |
+
undo_button = gr.Button('Undo seg', elem_id="btnSEG",scale=1)
|
186 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Please input your prompt",value='',lines=1)
|
187 |
+
negative_prompt = gr.Text(
|
188 |
+
label="Negative Prompt",
|
189 |
+
max_lines=5,
|
190 |
+
placeholder="Please input your negative prompt",
|
191 |
+
value='ugly, low quality',lines=1
|
192 |
+
)
|
193 |
+
with gr.Group():
|
194 |
+
with gr.Row():
|
195 |
+
blending = gr.Checkbox(label="Blurred Blending", value=False)
|
196 |
+
invert_mask = gr.Checkbox(label="Invert Mask", value=True)
|
197 |
+
run_button = gr.Button("Run",elem_id="btn")
|
198 |
+
|
199 |
+
with gr.Accordion("More input params (highly-recommended)", open=False, elem_id="accordion1"):
|
200 |
+
control_strength = gr.Slider(
|
201 |
+
label="Control Strength: ", show_label=True, minimum=0, maximum=1.1, value=1, step=0.01
|
202 |
+
)
|
203 |
+
with gr.Group():
|
204 |
+
seed = gr.Slider(
|
205 |
+
label="Seed: ", minimum=0, maximum=2147483647, step=1, value=551793204
|
206 |
+
)
|
207 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
|
208 |
+
|
209 |
+
with gr.Group():
|
210 |
+
with gr.Row():
|
211 |
+
guidance_scale = gr.Slider(
|
212 |
+
label="Guidance scale",
|
213 |
+
minimum=1,
|
214 |
+
maximum=12,
|
215 |
+
step=0.1,
|
216 |
+
value=12,
|
217 |
+
)
|
218 |
+
num_inference_steps = gr.Slider(
|
219 |
+
label="Number of inference steps",
|
220 |
+
minimum=1,
|
221 |
+
maximum=50,
|
222 |
+
step=1,
|
223 |
+
value=50,
|
224 |
+
)
|
225 |
+
with gr.Row(elem_id="Image"):
|
226 |
+
with gr.Tabs(elem_classes=["feedback1"]):
|
227 |
+
with gr.TabItem("User-specified Mask Image (Optional)"):
|
228 |
+
input_mask = gr.Image(type="numpy", label="Mask Image", height=640)
|
229 |
+
|
230 |
with gr.Column():
|
231 |
+
with gr.Tabs(elem_classes=["feedback"]):
|
232 |
+
with gr.TabItem("Outputs"):
|
233 |
+
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True)
|
234 |
+
with gr.Row():
|
235 |
+
def process_example(input_image, prompt, input_mask, original_image, selected_points,result_gallery): #
|
236 |
+
return input_image, prompt, input_mask, original_image, [], result_gallery
|
237 |
+
example = gr.Examples(
|
238 |
+
label="Input Example",
|
239 |
+
examples=image_examples,
|
240 |
+
inputs=[input_image, prompt, input_mask, original_image, selected_points,result_gallery],
|
241 |
+
outputs=[input_image, prompt, input_mask, original_image, selected_points],
|
242 |
+
fn=process_example,
|
243 |
+
run_on_click=True,
|
244 |
+
examples_per_page=10
|
245 |
+
)
|
246 |
+
|
247 |
+
# once user upload an image, the original image is stored in `original_image`
|
248 |
+
def store_img(img):
|
249 |
+
# image upload is too slow
|
250 |
+
if min(img.shape[0], img.shape[1]) > 512:
|
251 |
+
img = resize_image(img, 512)
|
252 |
+
if max(img.shape[0], img.shape[1])*1.0/min(img.shape[0], img.shape[1])>2.0:
|
253 |
+
raise gr.Error('image aspect ratio cannot be larger than 2.0')
|
254 |
+
return img, img, [], None # when new image is uploaded, `selected_points` should be empty
|
255 |
+
|
256 |
+
input_image.upload(
|
257 |
+
store_img,
|
258 |
+
[input_image],
|
259 |
+
[input_image, original_image, selected_points]
|
260 |
+
)
|
261 |
+
|
262 |
+
# user click the image to get points, and show the points on the image
|
263 |
+
def segmentation(img, sel_pix):
|
264 |
+
# online show seg mask
|
265 |
+
points = []
|
266 |
+
labels = []
|
267 |
+
for p, l in sel_pix:
|
268 |
+
points.append(p)
|
269 |
+
labels.append(l)
|
270 |
+
mobile_predictor.set_image(img if isinstance(img, np.ndarray) else np.array(img))
|
271 |
+
with torch.no_grad():
|
272 |
+
masks, _, _ = mobile_predictor.predict(point_coords=np.array(points), point_labels=np.array(labels), multimask_output=False)
|
273 |
+
|
274 |
+
output_mask = np.ones((masks.shape[1], masks.shape[2], 3))*255
|
275 |
+
for i in range(3):
|
276 |
+
output_mask[masks[0] == True, i] = 0.0
|
277 |
+
|
278 |
+
mask_all = np.ones((masks.shape[1], masks.shape[2], 3))
|
279 |
+
color_mask = np.random.random((1, 3)).tolist()[0]
|
280 |
+
for i in range(3):
|
281 |
+
mask_all[masks[0] == True, i] = color_mask[i]
|
282 |
+
masked_img = img / 255 * 0.3 + mask_all * 0.7
|
283 |
+
masked_img = masked_img*255
|
284 |
+
## draw points
|
285 |
+
for point, label in sel_pix:
|
286 |
+
cv2.drawMarker(masked_img, point, colors[label], markerType=markers[label], markerSize=20, thickness=5)
|
287 |
+
return masked_img, output_mask
|
288 |
+
|
289 |
+
def get_point(img, sel_pix, point_type, evt: gr.SelectData):
|
290 |
+
if point_type == 'foreground':
|
291 |
+
sel_pix.append((evt.index, 1)) # append the foreground_point
|
292 |
+
elif point_type == 'background':
|
293 |
+
sel_pix.append((evt.index, 0)) # append the background_point
|
294 |
+
else:
|
295 |
+
sel_pix.append((evt.index, 1)) # default foreground_point
|
296 |
+
|
297 |
+
if isinstance(img, int):
|
298 |
+
image_name = image_examples[img][0]
|
299 |
+
img = cv2.imread(image_name)
|
300 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
301 |
+
|
302 |
+
# online show seg mask
|
303 |
+
masked_img, output_mask = segmentation(img, sel_pix)
|
304 |
+
return masked_img.astype(np.uint8), output_mask
|
305 |
+
|
306 |
+
input_image.select(
|
307 |
+
get_point,
|
308 |
+
[original_image, selected_points, radio],
|
309 |
+
[input_image, original_mask],
|
310 |
+
)
|
311 |
+
|
312 |
+
# undo the selected point
|
313 |
+
def undo_points(orig_img, sel_pix):
|
314 |
+
# draw points
|
315 |
+
output_mask = None
|
316 |
+
if len(sel_pix) != 0:
|
317 |
+
if isinstance(orig_img, int): # if orig_img is int, the image if select from examples
|
318 |
+
temp = cv2.imread(image_examples[orig_img][0])
|
319 |
+
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
|
320 |
+
else:
|
321 |
+
temp = orig_img.copy()
|
322 |
+
sel_pix.pop()
|
323 |
+
# online show seg mask
|
324 |
+
if len(sel_pix) !=0:
|
325 |
+
temp, output_mask = segmentation(temp, sel_pix)
|
326 |
+
return temp.astype(np.uint8), output_mask
|
327 |
+
else:
|
328 |
+
gr.Error("Nothing to Undo")
|
329 |
+
|
330 |
+
undo_button.click(
|
331 |
+
undo_points,
|
332 |
+
[original_image, selected_points],
|
333 |
+
[input_image, original_mask]
|
334 |
+
)
|
335 |
+
|
336 |
+
ips=[input_image, original_image, original_mask, input_mask, selected_points, prompt, negative_prompt, blending, invert_mask, control_strength, seed, randomize_seed, guidance_scale, num_inference_steps]
|
337 |
+
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
338 |
|
|
|
|
|
339 |
|
340 |
+
block.launch()
|
|