Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -66,59 +66,27 @@ def resize_image(input_image, resolution):
|
|
| 66 |
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
|
| 67 |
return img
|
| 68 |
|
| 69 |
-
|
| 70 |
-
def process(input_image,
|
| 71 |
-
original_image,
|
| 72 |
-
original_mask,
|
| 73 |
-
input_mask,
|
| 74 |
-
selected_points,
|
| 75 |
-
prompt,
|
| 76 |
-
negative_prompt,
|
| 77 |
-
blended,
|
| 78 |
-
invert_mask,
|
| 79 |
-
control_strength,
|
| 80 |
-
seed,
|
| 81 |
-
randomize_seed,
|
| 82 |
-
guidance_scale,
|
| 83 |
-
num_inference_steps):
|
| 84 |
-
|
| 85 |
if original_image is None:
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
else:
|
| 90 |
-
raise gr.Error('Please upload the input image')
|
| 91 |
-
if (original_mask is None or len(selected_points)==0) and input_mask is None:
|
| 92 |
-
raise gr.Error("Please click the region where you hope unchanged/changed, or upload a white-black Mask image")
|
| 93 |
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
image_name = image_examples[original_image][0]
|
| 97 |
-
original_image = cv2.imread(image_name)
|
| 98 |
-
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
|
| 99 |
-
|
| 100 |
-
if input_mask is not None:
|
| 101 |
-
H,W=original_image.shape[:2]
|
| 102 |
-
original_mask = cv2.resize(input_mask, (W, H))
|
| 103 |
-
else:
|
| 104 |
-
original_mask = np.clip(255 - original_mask, 0, 255).astype(np.uint8)
|
| 105 |
|
| 106 |
if invert_mask:
|
| 107 |
-
original_mask=255-original_mask
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
masked_image = original_image * (1-mask)
|
| 111 |
-
|
| 112 |
init_image = Image.fromarray(masked_image.astype(np.uint8)).convert("RGB")
|
| 113 |
mask_image = Image.fromarray(original_mask.astype(np.uint8)).convert("RGB")
|
| 114 |
-
|
| 115 |
-
generator = torch.Generator("cuda").manual_seed(random.randint(0,2147483647) if randomize_seed else seed)
|
| 116 |
-
|
| 117 |
image = pipe(
|
| 118 |
-
[prompt]*2,
|
| 119 |
-
init_image,
|
| 120 |
-
mask_image,
|
| 121 |
-
num_inference_steps=num_inference_steps,
|
| 122 |
guidance_scale=guidance_scale,
|
| 123 |
generator=generator,
|
| 124 |
brushnet_conditioning_scale=float(control_strength),
|
|
@@ -126,220 +94,43 @@ def process(input_image,
|
|
| 126 |
).images
|
| 127 |
|
| 128 |
if blended:
|
| 129 |
-
if control_strength<1.0:
|
| 130 |
raise gr.Error('Using blurred blending with control strength less than 1.0 is not allowed')
|
| 131 |
-
blended_image=[]
|
| 132 |
-
# blur, you can adjust the parameters for better performance
|
| 133 |
mask_blurred = cv2.GaussianBlur(mask*255, (21, 21), 0)/255
|
| 134 |
mask_blurred = mask_blurred[:,:,np.newaxis]
|
| 135 |
-
mask = 1-(1-mask) * (1-mask_blurred)
|
| 136 |
for image_i in image:
|
| 137 |
-
image_np=np.array(image_i)
|
| 138 |
-
image_pasted=original_image * (1-mask) + image_np*mask
|
| 139 |
-
|
| 140 |
-
image_pasted=image_pasted.astype(image_np.dtype)
|
| 141 |
blended_image.append(Image.fromarray(image_pasted))
|
| 142 |
-
|
| 143 |
-
image=blended_image
|
| 144 |
|
| 145 |
return image
|
| 146 |
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
radius_size=gr.themes.sizes.radius_none,
|
| 150 |
-
text_size=gr.themes.sizes.text_md
|
| 151 |
-
)
|
| 152 |
-
).queue()
|
| 153 |
-
with block:
|
| 154 |
with gr.Row():
|
| 155 |
with gr.Column():
|
| 156 |
-
|
| 157 |
-
gr.
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
""
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
with gr.Accordion(label="🧭 Instructions:", open=True, elem_id="accordion"):
|
| 171 |
-
with gr.Row(equal_height=True):
|
| 172 |
-
gr.Markdown("""
|
| 173 |
-
- ⭐️ <b>step1: </b>Upload or select one image from Example
|
| 174 |
-
- ⭐️ <b>step2: </b>Click on Input-image to select the object to be retained (or upload a white-black Mask image, in which white color indicates the region you want to keep unchanged). You can tick the 'Invert Mask' box to switch region unchanged and change.
|
| 175 |
-
- ⭐️ <b>step3: </b>Input prompt for generating new contents
|
| 176 |
-
- ⭐️ <b>step4: </b>Click Run button
|
| 177 |
-
""")
|
| 178 |
-
with gr.Row():
|
| 179 |
-
with gr.Column():
|
| 180 |
-
with gr.Column(elem_id="Input"):
|
| 181 |
-
with gr.Row():
|
| 182 |
-
with gr.Tabs(elem_classes=["feedback"]):
|
| 183 |
-
with gr.TabItem("Input Image"):
|
| 184 |
-
input_image = gr.Image(type="numpy", label="input",scale=2, height=1024)
|
| 185 |
-
original_image = gr.State(value=None, label="index")
|
| 186 |
-
original_mask = gr.State(value=None)
|
| 187 |
-
selected_points = gr.State([],label="select points")
|
| 188 |
-
with gr.Row(elem_id="Seg"):
|
| 189 |
-
radio = gr.Radio(['foreground', 'background'], label='Click to seg: ', value='foreground',scale=2)
|
| 190 |
-
undo_button = gr.Button('Undo seg', elem_id="btnSEG",scale=1)
|
| 191 |
-
prompt = gr.Textbox(label="Prompt", placeholder="Please input your prompt",value='',lines=1)
|
| 192 |
-
negative_prompt = gr.Text(
|
| 193 |
-
label="Negative Prompt",
|
| 194 |
-
max_lines=5,
|
| 195 |
-
placeholder="Please input your negative prompt",
|
| 196 |
-
value='ugly, low quality',lines=1
|
| 197 |
-
)
|
| 198 |
-
with gr.Group():
|
| 199 |
-
with gr.Row():
|
| 200 |
-
blending = gr.Checkbox(label="Blurred Blending", value=False)
|
| 201 |
-
invert_mask = gr.Checkbox(label="Invert Mask", value=True)
|
| 202 |
-
run_button = gr.Button("Run",elem_id="btn")
|
| 203 |
-
|
| 204 |
-
with gr.Accordion("More input params (highly-recommended)", open=False, elem_id="accordion1"):
|
| 205 |
-
control_strength = gr.Slider(
|
| 206 |
-
label="Control Strength: ", show_label=True, minimum=0, maximum=1.1, value=1, step=0.01
|
| 207 |
-
)
|
| 208 |
-
with gr.Group():
|
| 209 |
-
seed = gr.Slider(
|
| 210 |
-
label="Seed: ", minimum=0, maximum=2147483647, step=1, value=551793204
|
| 211 |
-
)
|
| 212 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
|
| 213 |
-
|
| 214 |
-
with gr.Group():
|
| 215 |
-
with gr.Row():
|
| 216 |
-
guidance_scale = gr.Slider(
|
| 217 |
-
label="Guidance scale",
|
| 218 |
-
minimum=1,
|
| 219 |
-
maximum=12,
|
| 220 |
-
step=0.1,
|
| 221 |
-
value=12,
|
| 222 |
-
)
|
| 223 |
-
num_inference_steps = gr.Slider(
|
| 224 |
-
label="Number of inference steps",
|
| 225 |
-
minimum=1,
|
| 226 |
-
maximum=50,
|
| 227 |
-
step=1,
|
| 228 |
-
value=50,
|
| 229 |
-
)
|
| 230 |
-
with gr.Row(elem_id="Image"):
|
| 231 |
-
with gr.Tabs(elem_classes=["feedback1"]):
|
| 232 |
-
with gr.TabItem("User-specified Mask Image (Optional)"):
|
| 233 |
-
input_mask = gr.Image(type="numpy", label="Mask Image", height=1024)
|
| 234 |
-
|
| 235 |
with gr.Column():
|
| 236 |
-
|
| 237 |
-
with gr.TabItem("Outputs"):
|
| 238 |
-
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True)
|
| 239 |
-
with gr.Row():
|
| 240 |
-
def process_example(input_image, prompt, input_mask, original_image, selected_points,result_gallery): #
|
| 241 |
-
return input_image, prompt, input_mask, original_image, [], result_gallery
|
| 242 |
-
example = gr.Examples(
|
| 243 |
-
label="Input Example",
|
| 244 |
-
examples=image_examples,
|
| 245 |
-
inputs=[input_image, prompt, input_mask, original_image, selected_points, result_gallery],
|
| 246 |
-
outputs=[input_image, prompt, input_mask, original_image, selected_points],
|
| 247 |
-
fn=process_example,
|
| 248 |
-
run_on_click=True,
|
| 249 |
-
examples_per_page=10
|
| 250 |
-
)
|
| 251 |
-
|
| 252 |
-
# once user upload an image, the original image is stored in `original_image`
|
| 253 |
-
def store_img(img):
|
| 254 |
-
# image upload is too slow
|
| 255 |
-
if min(img.shape[0], img.shape[1]) > 1024:
|
| 256 |
-
img = resize_image(img, 1024)
|
| 257 |
-
if max(img.shape[0], img.shape[1])*1.0/min(img.shape[0], img.shape[1])>2.0:
|
| 258 |
-
raise gr.Error('image aspect ratio cannot be larger than 2.0')
|
| 259 |
-
return img, img, [], None # when new image is uploaded, `selected_points` should be empty
|
| 260 |
-
|
| 261 |
-
input_image.upload(
|
| 262 |
-
store_img,
|
| 263 |
-
[input_image],
|
| 264 |
-
[input_image, original_image, selected_points]
|
| 265 |
-
)
|
| 266 |
-
|
| 267 |
-
# user click the image to get points, and show the points on the image
|
| 268 |
-
def segmentation(img, sel_pix):
|
| 269 |
-
# online show seg mask
|
| 270 |
-
points = []
|
| 271 |
-
labels = []
|
| 272 |
-
for p, l in sel_pix:
|
| 273 |
-
points.append(p)
|
| 274 |
-
labels.append(l)
|
| 275 |
-
mobile_predictor.set_image(img if isinstance(img, np.ndarray) else np.array(img))
|
| 276 |
-
with torch.no_grad():
|
| 277 |
-
masks, _, _ = mobile_predictor.predict(point_coords=np.array(points), point_labels=np.array(labels), multimask_output=False)
|
| 278 |
-
|
| 279 |
-
output_mask = np.ones((masks.shape[1], masks.shape[2], 3))*255
|
| 280 |
-
for i in range(3):
|
| 281 |
-
output_mask[masks[0] == True, i] = 0.0
|
| 282 |
-
|
| 283 |
-
mask_all = np.ones((masks.shape[1], masks.shape[2], 3))
|
| 284 |
-
color_mask = np.random.random((1, 3)).tolist()[0]
|
| 285 |
-
for i in range(3):
|
| 286 |
-
mask_all[masks[0] == True, i] = color_mask[i]
|
| 287 |
-
masked_img = img / 255 * 0.3 + mask_all * 0.7
|
| 288 |
-
masked_img = masked_img*255
|
| 289 |
-
## draw points
|
| 290 |
-
for point, label in sel_pix:
|
| 291 |
-
cv2.drawMarker(masked_img, point, colors[label], markerType=markers[label], markerSize=20, thickness=5)
|
| 292 |
-
return masked_img, output_mask
|
| 293 |
-
|
| 294 |
-
def get_point(img, sel_pix, point_type, evt: gr.SelectData):
|
| 295 |
-
if point_type == 'foreground':
|
| 296 |
-
sel_pix.append((evt.index, 1)) # append the foreground_point
|
| 297 |
-
elif point_type == 'background':
|
| 298 |
-
sel_pix.append((evt.index, 0)) # append the background_point
|
| 299 |
-
else:
|
| 300 |
-
sel_pix.append((evt.index, 1)) # default foreground_point
|
| 301 |
-
|
| 302 |
-
if isinstance(img, int):
|
| 303 |
-
image_name = image_examples[img][0]
|
| 304 |
-
img = cv2.imread(image_name)
|
| 305 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 306 |
-
|
| 307 |
-
# online show seg mask
|
| 308 |
-
masked_img, output_mask = segmentation(img, sel_pix)
|
| 309 |
-
return masked_img.astype(np.uint8), output_mask
|
| 310 |
-
|
| 311 |
-
input_image.select(
|
| 312 |
-
get_point,
|
| 313 |
-
[original_image, selected_points, radio],
|
| 314 |
-
[input_image, original_mask],
|
| 315 |
-
)
|
| 316 |
-
|
| 317 |
-
# undo the selected point
|
| 318 |
-
def undo_points(orig_img, sel_pix):
|
| 319 |
-
# draw points
|
| 320 |
-
output_mask = None
|
| 321 |
-
if len(sel_pix) != 0:
|
| 322 |
-
if isinstance(orig_img, int): # if orig_img is int, the image if select from examples
|
| 323 |
-
temp = cv2.imread(image_examples[orig_img][0])
|
| 324 |
-
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
|
| 325 |
-
else:
|
| 326 |
-
temp = orig_img.copy()
|
| 327 |
-
sel_pix.pop()
|
| 328 |
-
# online show seg mask
|
| 329 |
-
if len(sel_pix) !=0:
|
| 330 |
-
temp, output_mask = segmentation(temp, sel_pix)
|
| 331 |
-
return temp.astype(np.uint8), output_mask
|
| 332 |
-
else:
|
| 333 |
-
gr.Error("Nothing to Undo")
|
| 334 |
-
|
| 335 |
-
undo_button.click(
|
| 336 |
-
undo_points,
|
| 337 |
-
[original_image, selected_points],
|
| 338 |
-
[input_image, original_mask]
|
| 339 |
-
)
|
| 340 |
-
|
| 341 |
-
ips=[input_image, original_image, original_mask, input_mask, selected_points, prompt, negative_prompt, blending, invert_mask, control_strength, seed, randomize_seed, guidance_scale, num_inference_steps]
|
| 342 |
-
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
| 343 |
|
|
|
|
|
|
|
| 344 |
|
| 345 |
-
|
|
|
|
|
|
| 66 |
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
|
| 67 |
return img
|
| 68 |
|
| 69 |
+
def process(original_image, input_mask, prompt, negative_prompt, blended, invert_mask, control_strength, seed, randomize_seed, guidance_scale, num_inference_steps):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
if original_image is None:
|
| 71 |
+
raise gr.Error('Please upload the input image')
|
| 72 |
+
if input_mask is None:
|
| 73 |
+
raise gr.Error("Please upload a white-black Mask image")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
+
H, W = original_image.shape[:2]
|
| 76 |
+
original_mask = cv2.resize(input_mask, (W, H))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
if invert_mask:
|
| 79 |
+
original_mask = 255 - original_mask
|
| 80 |
+
mask = 1.*(original_mask.sum(-1) > 255)[:,:,np.newaxis]
|
| 81 |
+
masked_image = original_image * (1 - mask)
|
|
|
|
|
|
|
| 82 |
init_image = Image.fromarray(masked_image.astype(np.uint8)).convert("RGB")
|
| 83 |
mask_image = Image.fromarray(original_mask.astype(np.uint8)).convert("RGB")
|
| 84 |
+
generator = torch.Generator("cuda").manual_seed(random.randint(0, 2147483647) if randomize_seed else seed)
|
|
|
|
|
|
|
| 85 |
image = pipe(
|
| 86 |
+
[prompt]*2,
|
| 87 |
+
init_image,
|
| 88 |
+
mask_image,
|
| 89 |
+
num_inference_steps=num_inference_steps,
|
| 90 |
guidance_scale=guidance_scale,
|
| 91 |
generator=generator,
|
| 92 |
brushnet_conditioning_scale=float(control_strength),
|
|
|
|
| 94 |
).images
|
| 95 |
|
| 96 |
if blended:
|
| 97 |
+
if control_strength < 1.0:
|
| 98 |
raise gr.Error('Using blurred blending with control strength less than 1.0 is not allowed')
|
| 99 |
+
blended_image = []
|
|
|
|
| 100 |
mask_blurred = cv2.GaussianBlur(mask*255, (21, 21), 0)/255
|
| 101 |
mask_blurred = mask_blurred[:,:,np.newaxis]
|
| 102 |
+
mask = 1 - (1 - mask) * (1 - mask_blurred)
|
| 103 |
for image_i in image:
|
| 104 |
+
image_np = np.array(image_i)
|
| 105 |
+
image_pasted = original_image * (1 - mask) + image_np * mask
|
| 106 |
+
image_pasted = image_pasted.astype(image_np.dtype)
|
|
|
|
| 107 |
blended_image.append(Image.fromarray(image_pasted))
|
| 108 |
+
image = blended_image
|
|
|
|
| 109 |
|
| 110 |
return image
|
| 111 |
|
| 112 |
+
# Create Gradio interface
|
| 113 |
+
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
with gr.Row():
|
| 115 |
with gr.Column():
|
| 116 |
+
original_image = gr.Image(type="numpy", label="Original Image")
|
| 117 |
+
input_mask = gr.Image(type="numpy", label="Mask Image")
|
| 118 |
+
prompt = gr.Textbox(label="Prompt")
|
| 119 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", value='ugly, low quality')
|
| 120 |
+
blended = gr.Checkbox(label="Blurred Blending", value=False)
|
| 121 |
+
invert_mask = gr.Checkbox(label="Invert Mask", value=False)
|
| 122 |
+
control_strength = gr.Slider(label="Control Strength", minimum=0, maximum=1.1, value=1, step=0.01)
|
| 123 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=551793204)
|
| 124 |
+
randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
|
| 125 |
+
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=12, step=0.1, value=7.5)
|
| 126 |
+
num_inference_steps = gr.Slider(label="Number of Inference Steps", minimum=1, maximum=50, step=1, value=50)
|
| 127 |
+
run_button = gr.Button("Run")
|
| 128 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
with gr.Column():
|
| 130 |
+
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
+
inputs = [original_image, input_mask, prompt, negative_prompt, blended, invert_mask, control_strength, seed, randomize_seed, guidance_scale, num_inference_steps]
|
| 133 |
+
run_button.click(fn=process, inputs=inputs, outputs=[result_gallery])
|
| 134 |
|
| 135 |
+
demo.queue(concurrency_count=1, max_size=1, api_open=True)
|
| 136 |
+
demo.launch(show_api=True)
|