Spaces:
Sleeping
Sleeping
Commit
·
3caebd7
1
Parent(s):
d1bffba
adding app with CLIP image segmentation
Browse files
app.py
CHANGED
|
@@ -29,6 +29,7 @@ def rescale_bbox(bbox,orig_image_shape=(1024,1024),model_shape=352):
|
|
| 29 |
return [int(y1),int(x1),int(y2),int(x2)]
|
| 30 |
|
| 31 |
def detect_using_clip(image,prompts=[],threshould=0.4):
|
|
|
|
| 32 |
model_detections = dict()
|
| 33 |
predicted_images = dict()
|
| 34 |
inputs = processor(
|
|
@@ -49,8 +50,12 @@ def detect_using_clip(image,prompts=[],threshould=0.4):
|
|
| 49 |
lbl_0 = label(predicted_image)
|
| 50 |
props = regionprops(lbl_0)
|
| 51 |
prompt = prompt.lower()
|
|
|
|
| 52 |
model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
| 54 |
return model_detections , predicted_images
|
| 55 |
|
| 56 |
def visualize_images(image,detections,predicted_image,prompt):
|
|
|
|
| 29 |
return [int(y1),int(x1),int(y2),int(x2)]
|
| 30 |
|
| 31 |
def detect_using_clip(image,prompts=[],threshould=0.4):
|
| 32 |
+
h,w = image.shape[:2]
|
| 33 |
model_detections = dict()
|
| 34 |
predicted_images = dict()
|
| 35 |
inputs = processor(
|
|
|
|
| 50 |
lbl_0 = label(predicted_image)
|
| 51 |
props = regionprops(lbl_0)
|
| 52 |
prompt = prompt.lower()
|
| 53 |
+
|
| 54 |
model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
|
| 55 |
+
a = np.expand_dims(predicted_image,axis=-1)
|
| 56 |
+
print(a.shape,image.shape[:2])
|
| 57 |
+
predicted_images[prompt]= cv2.resize(np.expand_dims(predicted_image,axis=-1),(h,w),interpolation = cv2.INTER_LINEAR)
|
| 58 |
+
|
| 59 |
return model_detections , predicted_images
|
| 60 |
|
| 61 |
def visualize_images(image,detections,predicted_image,prompt):
|