wondervictor commited on
Commit
303156d
·
verified ·
1 Parent(s): a0f5007

Update mask_adapter/sam_maskadapter.py

Browse files
Files changed (1) hide show
  1. mask_adapter/sam_maskadapter.py +87 -36
mask_adapter/sam_maskadapter.py CHANGED
@@ -19,6 +19,25 @@ from PIL import Image
19
  PIXEL_MEAN = [122.7709383, 116.7460125, 104.09373615]
20
  PIXEL_STD = [68.5005327, 66.6321579, 70.32316305]
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  class OpenVocabVisualizer(Visualizer):
23
  def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE, class_names=None):
24
  super().__init__(img_rgb, metadata, scale, instance_mode)
@@ -113,7 +132,7 @@ class SAMVisualizationDemo(object):
113
 
114
  return clip_vis_dense
115
 
116
- def run_on_image(self, ori_image, class_names, text_features):
117
  height, width, _ = ori_image.shape
118
  if width > height:
119
  new_width = 896
@@ -139,25 +158,25 @@ class SAMVisualizationDemo(object):
139
  image = (image - pixel_mean) / pixel_std
140
 
141
  image = image.unsqueeze(0)
142
-
143
- image = image.to(text_features)
144
- # if len(class_names) == 1:
145
- # class_names.append('others')
146
- # txts = [f'a photo of {cls_name}' for cls_name in class_names]
147
- # text = open_clip.tokenize(txts)
148
 
149
 
150
  with torch.no_grad():
151
- # text_features = self.clip_model.encode_text(text)
152
- # text_features /= text_features.norm(dim=-1, keepdim=True)
 
153
 
154
- features = self.extract_features_convnext(image.float())
155
 
156
  clip_feature = features['clip_vis_dense']
157
 
158
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
159
 
160
- semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).to(text_features).float())
161
 
162
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
163
  mode='bilinear', align_corners=False)
@@ -188,7 +207,7 @@ class SAMVisualizationDemo(object):
188
  select_mask.extend(locs[0].tolist())
189
  for idx in select_mask:
190
  select_cls[idx] = class_preds[idx]
191
- semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.to(text_features).float())
192
 
193
  r = semseg
194
  blank_area = (r[0] == 0)
@@ -225,33 +244,16 @@ class SAMPointVisualizationDemo(object):
225
  self.clip_model = clip_model
226
 
227
  self.mask_adapter = mask_adapter
228
-
229
-
230
- #from .data.datasets import openseg_classes
231
-
232
- #COCO_CATEGORIES_pan = openseg_classes.get_coco_categories_with_prompt_eng()
233
- #COCO_CATEGORIES_seg = openseg_classes.get_coco_stuff_categories_with_prompt_eng()
234
-
235
- #thing_classes = [k["name"] for k in COCO_CATEGORIES_pan if k["isthing"] == 1]
236
- #stuff_classes = [k["name"] for k in COCO_CATEGORIES_pan]
237
- #print(coco_metadata)
238
- #lvis_classes = open("./mask_adapter/data/datasets/lvis_1203_with_prompt_eng.txt", 'r').read().splitlines()
239
- #lvis_classes = [x[x.find(':')+1:] for x in lvis_classes]
240
-
241
- #self.class_names = thing_classes + stuff_classes + lvis_classes
242
- #self.text_embedding = torch.from_numpy(np.load("./text_embedding/lvis_coco_text_embedding.npy"))
243
 
244
  self.class_names = self._load_class_names()
245
 
246
  def _load_class_names(self):
247
  from .data.datasets import openseg_classes
248
  COCO_CATEGORIES_pan = openseg_classes.get_coco_categories_with_prompt_eng()
249
- thing_classes = [k["name"] for k in COCO_CATEGORIES_pan if k["isthing"] == 1]
250
  stuff_classes = [k["name"] for k in COCO_CATEGORIES_pan]
251
  ADE20K_150_CATEGORIES_ = openseg_classes.get_ade20k_categories_with_prompt_eng()
252
- ade20k_thing_classes = [k["name"] for k in ADE20K_150_CATEGORIES_ if k["isthing"] == 1]
253
  ade20k_stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES_]
254
- class_names = thing_classes + stuff_classes + ade20k_thing_classes+ ade20k_stuff_classes
255
  return [ class_name for class_name in class_names ]
256
 
257
 
@@ -285,6 +287,12 @@ class SAMPointVisualizationDemo(object):
285
  def run_on_image_with_points(self, ori_image, points,text_features,class_names=None):
286
  if class_names != None:
287
  self.class_names = class_names
 
 
 
 
 
 
288
  height, width, _ = ori_image.shape
289
 
290
  image = ori_image
@@ -333,7 +341,18 @@ class SAMPointVisualizationDemo(object):
333
  pooled_clip_feature = (pooled_clip_feature.reshape(B, num_instances, 16, -1).mean(dim=-2).contiguous())
334
 
335
  class_preds = (100.0 * pooled_clip_feature @ text_features.T).softmax(dim=-1)
336
- class_preds = class_preds.squeeze(0)
 
 
 
 
 
 
 
 
 
 
 
337
 
338
  # Resize mask to match original image size
339
  pred_mask = cv2.resize(masks.squeeze(0), (width, height), interpolation=cv2.INTER_NEAREST) # Resize mask to match original image size
@@ -364,7 +383,12 @@ class SAMPointVisualizationDemo(object):
364
  def run_on_image_with_boxes(self, ori_image, bbox,text_features,class_names=None):
365
  if class_names != None:
366
  self.class_names = class_names
367
-
 
 
 
 
 
368
  height, width, _ = ori_image.shape
369
 
370
  image = ori_image
@@ -387,12 +411,28 @@ class SAMPointVisualizationDemo(object):
387
  image = image.unsqueeze(0)
388
 
389
  # txts = [f'a photo of {cls_name}' for cls_name in self.class_names]
390
- # text = open_clip.tokenize(txts)
 
 
 
 
 
391
 
392
  with torch.no_grad():
393
- # text_features = self.clip_model.encode_text(text.cuda())
 
 
 
 
 
 
 
 
394
  # text_features /= text_features.norm(dim=-1, keepdim=True)
395
- #np.save("/home/yongkangli/Mask-Adapter/text_embedding/lvis_coco_text_embedding.npy", text_features.cpu().numpy())
 
 
 
396
  #text_features = self.text_embedding.to(self.mask_adapter.device)
397
  features = self.extract_features_convnext(image.to(text_features).float())
398
  clip_feature = features['clip_vis_dense']
@@ -411,7 +451,18 @@ class SAMPointVisualizationDemo(object):
411
  pooled_clip_feature = (pooled_clip_feature.reshape(B, num_instances, 16, -1).mean(dim=-2).contiguous())
412
 
413
  class_preds = (100.0 * pooled_clip_feature @ text_features.T).softmax(dim=-1)
414
- class_preds = class_preds.squeeze(0)
 
 
 
 
 
 
 
 
 
 
 
415
 
416
  # Resize mask to match original image size
417
  pred_mask = cv2.resize(masks.squeeze(0), (width, height), interpolation=cv2.INTER_NEAREST) # Resize mask to match original image size
 
19
  PIXEL_MEAN = [122.7709383, 116.7460125, 104.09373615]
20
  PIXEL_STD = [68.5005327, 66.6321579, 70.32316305]
21
 
22
+
23
+ VILD_PROMPT = [
24
+ "a photo of a {}.",
25
+ "This is a photo of a {}",
26
+ "There is a {} in the scene",
27
+ "There is the {} in the scene",
28
+ "a photo of a {} in the scene",
29
+ "a photo of a small {}.",
30
+ "a photo of a medium {}.",
31
+ "a photo of a large {}.",
32
+ "This is a photo of a small {}.",
33
+ "This is a photo of a medium {}.",
34
+ "This is a photo of a large {}.",
35
+ "There is a small {} in the scene.",
36
+ "There is a medium {} in the scene.",
37
+ "There is a large {} in the scene.",
38
+ ]
39
+
40
+
41
  class OpenVocabVisualizer(Visualizer):
42
  def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE, class_names=None):
43
  super().__init__(img_rgb, metadata, scale, instance_mode)
 
132
 
133
  return clip_vis_dense
134
 
135
+ def run_on_image(self, ori_image, class_names):
136
  height, width, _ = ori_image.shape
137
  if width > height:
138
  new_width = 896
 
158
  image = (image - pixel_mean) / pixel_std
159
 
160
  image = image.unsqueeze(0)
161
+
162
+ if len(class_names) == 1:
163
+ class_names.append('others')
164
+ txts = [f'a photo of {cls_name}' for cls_name in class_names]
165
+ text = open_clip.tokenize(txts)
 
166
 
167
 
168
  with torch.no_grad():
169
+ self.clip_model.cuda()
170
+ text_features = self.clip_model.encode_text(text.cuda())
171
+ text_features /= text_features.norm(dim=-1, keepdim=True)
172
 
173
+ features = self.extract_features_convnext(image.cuda().float())
174
 
175
  clip_feature = features['clip_vis_dense']
176
 
177
  clip_vis_dense = self.visual_prediction_forward_convnext_2d(clip_feature)
178
 
179
+ semantic_activation_maps = self.mask_adapter(clip_vis_dense, pred_masks.tensor.unsqueeze(0).float().cuda())
180
 
181
  maps_for_pooling = F.interpolate(semantic_activation_maps, size=clip_feature.shape[-2:],
182
  mode='bilinear', align_corners=False)
 
207
  select_mask.extend(locs[0].tolist())
208
  for idx in select_mask:
209
  select_cls[idx] = class_preds[idx]
210
+ semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float().cuda())
211
 
212
  r = semseg
213
  blank_area = (r[0] == 0)
 
244
  self.clip_model = clip_model
245
 
246
  self.mask_adapter = mask_adapter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
  self.class_names = self._load_class_names()
249
 
250
  def _load_class_names(self):
251
  from .data.datasets import openseg_classes
252
  COCO_CATEGORIES_pan = openseg_classes.get_coco_categories_with_prompt_eng()
 
253
  stuff_classes = [k["name"] for k in COCO_CATEGORIES_pan]
254
  ADE20K_150_CATEGORIES_ = openseg_classes.get_ade20k_categories_with_prompt_eng()
 
255
  ade20k_stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES_]
256
+ class_names = stuff_classes + ade20k_stuff_classes #+ lvis_classes
257
  return [ class_name for class_name in class_names ]
258
 
259
 
 
287
  def run_on_image_with_points(self, ori_image, points,text_features,class_names=None):
288
  if class_names != None:
289
  self.class_names = class_names
290
+ else:
291
+ num_templates = []
292
+ for cls_name in self.class_names:
293
+ cls_name = cls_name.replace(', ', ',').split(',')#[0]
294
+ num_templates.append(len(cls_name))
295
+
296
  height, width, _ = ori_image.shape
297
 
298
  image = ori_image
 
341
  pooled_clip_feature = (pooled_clip_feature.reshape(B, num_instances, 16, -1).mean(dim=-2).contiguous())
342
 
343
  class_preds = (100.0 * pooled_clip_feature @ text_features.T).softmax(dim=-1)
344
+
345
+ if class_names is None:
346
+ final_class_preds = []
347
+ cur_idx = 0
348
+ for num_t in num_templates:
349
+ final_class_preds.append(class_preds[:, :, cur_idx: cur_idx + num_t].max(-1).values)
350
+ cur_idx += num_t
351
+ final_class_preds = torch.stack(final_class_preds, dim=-1)
352
+
353
+ class_preds = final_class_preds.squeeze(0)
354
+ else:
355
+ class_preds = class_preds.squeeze(0)
356
 
357
  # Resize mask to match original image size
358
  pred_mask = cv2.resize(masks.squeeze(0), (width, height), interpolation=cv2.INTER_NEAREST) # Resize mask to match original image size
 
383
  def run_on_image_with_boxes(self, ori_image, bbox,text_features,class_names=None):
384
  if class_names != None:
385
  self.class_names = class_names
386
+ else:
387
+ num_templates = []
388
+ for cls_name in self.class_names:
389
+ cls_name = cls_name.replace(', ', ',').split(',')#[0]
390
+ num_templates.append(len(cls_name))
391
+
392
  height, width, _ = ori_image.shape
393
 
394
  image = ori_image
 
411
  image = image.unsqueeze(0)
412
 
413
  # txts = [f'a photo of {cls_name}' for cls_name in self.class_names]
414
+
415
+
416
+
417
+ # txts.append(f'a photo of {cls_name}')
418
+ #assert len(self.class_names) * 14 == len(txts)
419
+ #text = open_clip.tokenize(txts)
420
 
421
  with torch.no_grad():
422
+ # text_features = []
423
+ # bs = 128
424
+ # for idx in range(0, len(text), bs):
425
+ # text_features.append(
426
+ # self.clip_model.encode_text(text[idx:idx+bs].cuda())
427
+ # )
428
+ # text_features = torch.cat(text_features, dim=0)
429
+
430
+ # #text_features = self.clip_model.encode_text(text.cuda())
431
  # text_features /= text_features.norm(dim=-1, keepdim=True)
432
+ # text_features = text_features.reshape(text_features.shape[0] // len(VILD_PROMPT), len(VILD_PROMPT), text_features.shape[-1]).mean(1)
433
+ # text_features /= text_features.norm(dim=-1, keepdim=True)
434
+ # print(text_features.shape)
435
+ # np.save("/data/yongkangli/Mask-Adapter-Demo/text_embedding/coco_ade20k_text_embedding_new.npy", text_features.cpu().numpy())
436
  #text_features = self.text_embedding.to(self.mask_adapter.device)
437
  features = self.extract_features_convnext(image.to(text_features).float())
438
  clip_feature = features['clip_vis_dense']
 
451
  pooled_clip_feature = (pooled_clip_feature.reshape(B, num_instances, 16, -1).mean(dim=-2).contiguous())
452
 
453
  class_preds = (100.0 * pooled_clip_feature @ text_features.T).softmax(dim=-1)
454
+
455
+ if class_names is None:
456
+ final_class_preds = []
457
+ cur_idx = 0
458
+ for num_t in num_templates:
459
+ final_class_preds.append(class_preds[:, :, cur_idx: cur_idx + num_t].max(-1).values)
460
+ cur_idx += num_t
461
+ final_class_preds = torch.stack(final_class_preds, dim=-1)
462
+
463
+ class_preds = final_class_preds.squeeze(0)
464
+ else:
465
+ class_preds = class_preds.squeeze(0)
466
 
467
  # Resize mask to match original image size
468
  pred_mask = cv2.resize(masks.squeeze(0), (width, height), interpolation=cv2.INTER_NEAREST) # Resize mask to match original image size