Spaces:
Sleeping
Sleeping
Update mask_adapter/sam_maskadapter.py
Browse files
mask_adapter/sam_maskadapter.py
CHANGED
@@ -239,7 +239,7 @@ class SAMPointVisualizationDemo(object):
|
|
239 |
lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
|
240 |
|
241 |
self.class_names = thing_classes + stuff_classes + lvis_classes
|
242 |
-
self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
|
243 |
|
244 |
self.class_names = self._load_class_names()
|
245 |
|
@@ -280,7 +280,7 @@ class SAMPointVisualizationDemo(object):
|
|
280 |
|
281 |
return clip_vis_dense
|
282 |
|
283 |
-
def run_on_image_with_points(self, ori_image, points):
|
284 |
height, width, _ = ori_image.shape
|
285 |
|
286 |
image = ori_image
|
@@ -311,13 +311,13 @@ class SAMPointVisualizationDemo(object):
|
|
311 |
# text_features = self.clip_model.encode_text(text.cuda())
|
312 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
313 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
314 |
-
text_features = self.text_embedding.to(self.mask_adapter.device)
|
315 |
-
features = self.extract_features_convnext(image.to(
|
316 |
clip_feature = features['clip_vis_dense']
|
317 |
|
318 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
319 |
|
320 |
-
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(
|
321 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
|
322 |
|
323 |
B, C = clip_feature.size(0), clip_feature.size(1)
|
|
|
239 |
lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
|
240 |
|
241 |
self.class_names = thing_classes + stuff_classes + lvis_classes
|
242 |
+
#self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
|
243 |
|
244 |
self.class_names = self._load_class_names()
|
245 |
|
|
|
280 |
|
281 |
return clip_vis_dense
|
282 |
|
283 |
+
def run_on_image_with_points(self, ori_image, points,text_features):
|
284 |
height, width, _ = ori_image.shape
|
285 |
|
286 |
image = ori_image
|
|
|
311 |
# text_features = self.clip_model.encode_text(text.cuda())
|
312 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
313 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
314 |
+
#text_features = self.text_embedding.to(self.mask_adapter.device)
|
315 |
+
features = self.extract_features_convnext(image.to(text_features).float())
|
316 |
clip_feature = features['clip_vis_dense']
|
317 |
|
318 |
clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
|
319 |
|
320 |
+
semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(text_features).float())
|
321 |
maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False)
|
322 |
|
323 |
B, C = clip_feature.size(0), clip_feature.size(1)
|