Spaces:
Sleeping
Sleeping
Commit
·
2677815
1
Parent(s):
3caebd7
adding app with CLIP image segmentation
Browse files
app.py
CHANGED
|
@@ -52,35 +52,29 @@ def detect_using_clip(image,prompts=[],threshould=0.4):
|
|
| 52 |
prompt = prompt.lower()
|
| 53 |
|
| 54 |
model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
|
| 55 |
-
|
| 56 |
-
print(a.shape,image.shape[:2])
|
| 57 |
-
predicted_images[prompt]= cv2.resize(np.expand_dims(predicted_image,axis=-1),(h,w),interpolation = cv2.INTER_LINEAR)
|
| 58 |
-
|
| 59 |
return model_detections , predicted_images
|
| 60 |
|
| 61 |
-
def visualize_images(image,detections,
|
| 62 |
alpha = 0.7
|
| 63 |
-
H,W = image.shape[:2]
|
| 64 |
prompt = prompt.lower()
|
| 65 |
-
|
| 66 |
-
mask_image = create_mask(image=
|
| 67 |
|
| 68 |
if prompt not in detections.keys():
|
| 69 |
print("prompt not in query ..")
|
| 70 |
-
return
|
| 71 |
-
|
| 72 |
-
cv2.rectangle(image_copy, (int(bbox[1]), int(bbox[0])), (int(bbox[3]), int(bbox[2])), (255, 0, 0), 2)
|
| 73 |
-
cv2.putText(image_copy,str(prompt),(int(bbox[1]), int(bbox[0])),cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
|
| 74 |
-
final_image = cv2.addWeighted(image_copy,alpha,mask_image,1-alpha,0)
|
| 75 |
return final_image
|
| 76 |
|
| 77 |
def shot(image, labels_text,selected_categoty):
|
| 78 |
prompts = labels_text.split(',')
|
| 79 |
prompts = list(map(lambda x: x.strip(),prompts))
|
| 80 |
-
|
| 81 |
model_detections,predicted_images = detect_using_clip(image,prompts=prompts)
|
| 82 |
|
| 83 |
-
category_image = visualize_images(image=image,detections=model_detections,
|
|
|
|
| 84 |
return category_image
|
| 85 |
|
| 86 |
iface = gr.Interface(fn=shot,
|
|
|
|
| 52 |
prompt = prompt.lower()
|
| 53 |
|
| 54 |
model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
|
| 55 |
+
predicted_images[prompt]= predicted_image
|
|
|
|
|
|
|
|
|
|
| 56 |
return model_detections , predicted_images
|
| 57 |
|
| 58 |
+
def visualize_images(image,detections,predicted_images,prompt):
|
| 59 |
alpha = 0.7
|
| 60 |
+
# H,W = image.shape[:2]
|
| 61 |
prompt = prompt.lower()
|
| 62 |
+
image_resize = cv2.resize(image,(352,352))
|
| 63 |
+
mask_image = create_mask(image=image_resize,image_mask=predicted_images[prompt])
|
| 64 |
|
| 65 |
if prompt not in detections.keys():
|
| 66 |
print("prompt not in query ..")
|
| 67 |
+
return image_resize
|
| 68 |
+
final_image = cv2.addWeighted(image_resize,alpha,mask_image,1-alpha,0)
|
|
|
|
|
|
|
|
|
|
| 69 |
return final_image
|
| 70 |
|
| 71 |
def shot(image, labels_text,selected_categoty):
|
| 72 |
prompts = labels_text.split(',')
|
| 73 |
prompts = list(map(lambda x: x.strip(),prompts))
|
|
|
|
| 74 |
model_detections,predicted_images = detect_using_clip(image,prompts=prompts)
|
| 75 |
|
| 76 |
+
category_image = visualize_images(image=image,detections=model_detections,predicted_images=predicted_images,prompt=selected_categoty)
|
| 77 |
+
|
| 78 |
return category_image
|
| 79 |
|
| 80 |
iface = gr.Interface(fn=shot,
|