Spaces:
Sleeping
Sleeping
Update mask_adapter/sam_maskadapter.py
Browse files
mask_adapter/sam_maskadapter.py
CHANGED
@@ -239,7 +239,7 @@ class SAMPointVisualizationDemo(object):
|
|
239 |
lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
|
240 |
|
241 |
self.class_names = thing_classes + stuff_classes + lvis_classes
|
242 |
-
|
243 |
|
244 |
self.class_names = self._load_class_names()
|
245 |
|
@@ -280,7 +280,7 @@ class SAMPointVisualizationDemo(object):
|
|
280 |
|
281 |
return clip_vis_dense
|
282 |
|
283 |
-
def run_on_image_with_points(self, ori_image, points
|
284 |
height, width, _ = ori_image.shape
|
285 |
|
286 |
image = ori_image
|
@@ -311,7 +311,7 @@ class SAMPointVisualizationDemo(object):
|
|
311 |
# text_features = self.clip_model.encode_text(text.cuda())
|
312 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
313 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
314 |
-
|
315 |
features = self.extract_features_convnext(image.to(self.clip_model.device).float())
|
316 |
clip_feature = features['clip_vis_dense']
|
317 |
|
|
|
239 |
lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
|
240 |
|
241 |
self.class_names = thing_classes + stuff_classes + lvis_classes
|
242 |
+
self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
|
243 |
|
244 |
self.class_names = self._load_class_names()
|
245 |
|
|
|
280 |
|
281 |
return clip_vis_dense
|
282 |
|
283 |
+
def run_on_image_with_points(self, ori_image, points):
|
284 |
height, width, _ = ori_image.shape
|
285 |
|
286 |
image = ori_image
|
|
|
311 |
# text_features = self.clip_model.encode_text(text.cuda())
|
312 |
# text_features /= text_features.norm(dim=-1, keepdim=True)
|
313 |
#np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
|
314 |
+
text_features = self.text_embedding.to(self.clip_model.device)
|
315 |
features = self.extract_features_convnext(image.to(self.clip_model.device).float())
|
316 |
clip_feature = features['clip_vis_dense']
|
317 |
|