wondervictor commited on
Commit
d2690c4
·
verified ·
1 Parent(s): 1d8e8c2

Update mask_adapter/sam_maskadapter.py

Browse files
Files changed (1) hide show
  1. mask_adapter/sam_maskadapter.py +30 -31
mask_adapter/sam_maskadapter.py CHANGED
@@ -132,7 +132,7 @@ class SAMVisualizationDemo(object):
132
 
133
  return clip_vis_dense
134
 
135
- def run_on_image(self, ori_image, class_names):
136
  height, width, _ = ori_image.shape
137
  if width > height:
138
  new_width = 896
@@ -158,25 +158,25 @@ class SAMVisualizationDemo(object):
158
  image = (image - pixel_mean) / pixel_std
159
 
160
  image = image.unsqueeze(0)
161
-
162
- if len(class_names) == 1:
163
- class_names.append('others')
164
- txts = [f'a photo of {cls_name}' for cls_name in class_names]
165
- text = open_clip.tokenize(txts)
 
166
 
167
 
168
  with torch.no_grad():
169
- self.clip_model.cuda()
170
- text_features = self.clip_model.encode_text(text.cuda())
171
- text_features /= text_features.norm(dim=-1, keepdim=True)
172
 
173
- features = self.extract_features_convnext(image.cuda().float())
174
 
175
  clip_feature = features['clip_vis_dense']
176
 
177
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
178
 
179
- semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
180
 
181
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
182
  mode='bilinear', align_corners=False)
@@ -207,7 +207,7 @@ class SAMVisualizationDemo(object):
207
  select_mask.extend(locs[0].tolist())
208
  for idx in select_mask:
209
  select_cls[idx] = class_preds[idx]
210
- semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float().cuda())
211
 
212
  r = semseg
213
  blank_area = (r[0] == 0)
@@ -244,6 +244,21 @@ class SAMPointVisualizationDemo(object):
244
  self.clip_model = clip_model
245
 
246
  self.mask_adapter = mask_adapter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
  self.class_names = self._load_class_names()
249
 
@@ -411,28 +426,12 @@ class SAMPointVisualizationDemo(object):
411
  image = image.unsqueeze(0)
412
 
413
  # txts = [f'a photo of {cls_name}' for cls_name in self.class_names]
414
-
415
-
416
-
417
- # txts.append(f'a photo of {cls_name}')
418
- #assert len(self.class_names) * 14 == len(txts)
419
- #text = open_clip.tokenize(txts)
420
 
421
  with torch.no_grad():
422
- # text_features = []
423
- # bs = 128
424
- # for idx in range(0, len(text), bs):
425
- # text_features.append(
426
- # self.clip_model.encode_text(text[idx:idx+bs].cuda())
427
- # )
428
- # text_features = torch.cat(text_features, dim=0)
429
-
430
- # #text_features = self.clip_model.encode_text(text.cuda())
431
- # text_features /= text_features.norm(dim=-1, keepdim=True)
432
- # text_features = text_features.reshape(text_features.shape[0] // len(VILD_PROMPT), len(VILD_PROMPT), text_features.shape[-1]).mean(1)
433
  # text_features /= text_features.norm(dim=-1, keepdim=True)
434
- # print(text_features.shape)
435
- # np.save("/data/yongkangli/Mask-Adapter-Demo/text_embedding/coco_ade20k_text_embedding_new.npy", text_features.cpu().numpy())
436
  #text_features = self.text_embedding.to(self.mask_adapter.device)
437
  features = self.extract_features_convnext(image.to(text_features).float())
438
  clip_feature = features['clip_vis_dense']
 
132
 
133
  return clip_vis_dense
134
 
135
+ def run_on_image(self, ori_image, class_names, text_features):
136
  height, width, _ = ori_image.shape
137
  if width > height:
138
  new_width = 896
 
158
  image = (image - pixel_mean) / pixel_std
159
 
160
  image = image.unsqueeze(0)
161
+
162
+ image = image.to(text_features)
163
+ # if len(class_names) == 1:
164
+ # class_names.append('others')
165
+ # txts = [f'a photo of {cls_name}' for cls_name in class_names]
166
+ # text = open_clip.tokenize(txts)
167
 
168
 
169
  with torch.no_grad():
170
+ # text_features = self.clip_model.encode_text(text)
171
+ # text_features /= text_features.norm(dim=-1, keepdim=True)
 
172
 
173
+ features = self.extract_features_convnext(image.float())
174
 
175
  clip_feature = features['clip_vis_dense']
176
 
177
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
178
 
179
+ semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(text_features).float())
180
 
181
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
182
  mode='bilinear', align_corners=False)
 
207
  select_mask.extend(locs[0].tolist())
208
  for idx in select_mask:
209
  select_cls[idx] = class_preds[idx]
210
+ semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.to(text_features).float())
211
 
212
  r = semseg
213
  blank_area = (r[0] == 0)
 
244
  self.clip_model = clip_model
245
 
246
  self.mask_adapter = mask_adapter
247
+
248
+
249
+ #from .data.datasets import openseg_classes
250
+
251
+ #COCO_CATEGORIES_pan = openseg_classes.get_coco_categories_with_prompt_eng()
252
+ #COCO_CATEGORIES_seg = openseg_classes.get_coco_stuff_categories_with_prompt_eng()
253
+
254
+ #thing_classes = [k["name"] for k in COCO_CATEGORIES_pan if k["isthing"] == 1]
255
+ #stuff_classes = [k["name"] for k in COCO_CATEGORIES_pan]
256
+ #print(coco_metadata)
257
+ #lvis_classes = open("./mask_adapter/data/datasets/lvis_1203_with_prompt_eng.txt", 'r').read().splitlines()
258
+ #lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
259
+
260
+ #self.class_names = thing_classes + stuff_classes + lvis_classes
261
+ #self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
262
 
263
  self.class_names = self._load_class_names()
264
 
 
426
  image = image.unsqueeze(0)
427
 
428
  # txts = [f'a photo of {cls_name}' for cls_name in self.class_names]
429
+ # text = open_clip.tokenize(txts)
 
 
 
 
 
430
 
431
  with torch.no_grad():
432
+ # text_features = self.clip_model.encode_text(text.cuda())
 
 
 
 
 
 
 
 
 
 
433
  # text_features /= text_features.norm(dim=-1, keepdim=True)
434
+ #np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
 
435
  #text_features = self.text_embedding.to(self.mask_adapter.device)
436
  features = self.extract_features_convnext(image.to(text_features).float())
437
  clip_feature = features['clip_vis_dense']