repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
544
27.684211
66
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # model settings conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
561
27.1
67
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
207
28.714286
79
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py
_base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
157
30.6
53
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
209
29
79
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, conv_cfg=conv_cfg, norm_cfg=norm_cfg)))
577
33
78
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
546
27.789474
67
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # model settings conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
559
27
66
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' # learning policy lr_config = dict(step=[20, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
163
31.8
58
py
DSLA-DSLA
DSLA-DSLA/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( conv_cfg=conv_cfg, norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, conv_cfg=conv_cfg, norm_cfg=norm_cfg), mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
739
34.238095
78
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py
_base_ = './ga_faster_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
419
27
76
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py
_base_ = './ga_faster_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
419
27
76
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py
_base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
422
27.2
76
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5), rpn_proposal=dict(nms_post=1000, max_per_img=300), rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(type='RandomSampler', num=256))), test_cfg=dict( rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,402
35.409091
77
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py
_base_ = './ga_rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
416
26.8
76
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py
_base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
236
25.333333
67
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py
_base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
422
27.2
76
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py
_base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
222
26.875
67
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py
_base_ = './ga_rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
416
26.8
76
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py
_base_ = '../rpn/rpn_r50_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5)), test_cfg=dict(rpn=dict(nms_post=1000))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,022
33.288136
74
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), # model training and testing settings train_cfg=dict( rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(num=256))), test_cfg=dict(rcnn=dict(score_thr=1e-3))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=300), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( train=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', pipeline=train_pipeline), val=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline), test=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,407
35.484848
78
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5), rpn_proposal=dict(nms_post=1000, max_per_img=300), rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(type='RandomSampler', num=256))), test_cfg=dict( rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,408
35.5
77
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( _delete_=True, type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), # training and testing settings train_cfg=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), center_ratio=0.2, ignore_ratio=0.5)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,049
31.539683
74
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py
_base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
225
27.25
67
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py' model = dict( bbox_head=dict( _delete_=True, type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), # training and testing settings train_cfg=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), center_ratio=0.2, ignore_ratio=0.5)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,055
31.634921
74
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py
_base_ = '../_base_/default_runtime.py' # model settings model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5), bbox_head=dict( type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0))) # training and testing settings train_cfg = dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, center_ratio=0.2, ignore_ratio=0.5, debug=False) test_cfg = dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], keep_ratio=True, multiscale_mode='range'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[16, 22]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24)
5,095
28.976471
74
py
DSLA-DSLA
DSLA-DSLA/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), # model training and testing settings train_cfg=dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5)), test_cfg=dict(rpn=dict(nms_post=1000))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,028
33.389831
74
py
DSLA-DSLA
DSLA-DSLA/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa model = dict( type='LAD', # student backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # teacher teacher_ckpt=teacher_ckpt, teacher_backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), teacher_neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), teacher_bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, score_voting=True, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) data = dict(samples_per_gpu=8, workers_per_gpu=4) optimizer = dict(lr=0.01) fp16 = dict(loss_scale=512.)
3,719
29.743802
139
py
DSLA-DSLA
DSLA-DSLA/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa model = dict( type='LAD', # student backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # teacher teacher_ckpt=teacher_ckpt, teacher_backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), teacher_neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), teacher_bbox_head=dict( type='LADHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, score_voting=True, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) data = dict(samples_per_gpu=8, workers_per_gpu=4) optimizer = dict(lr=0.01) fp16 = dict(loss_scale=512.)
3,741
29.672131
138
py
DSLA-DSLA
DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline))
2,191
40.358491
78
py
DSLA-DSLA
DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, value) for value in min_values], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(policy='step', step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
853
34.583333
77
py
DSLA-DSLA
DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
216
26.125
61
py
DSLA-DSLA
DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
235
28.5
78
py
DSLA-DSLA
DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] num_stages = 6 num_proposals = 100 model = dict( type='SparseRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1) for _ in range(num_stages) ]), test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
3,469
35.145833
79
py
DSLA-DSLA
DSLA-DSLA/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_detection.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict(init_cfg=None), roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))) # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, # [7] yields higher performance than [6] step=[7]) runner = dict( type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 log_config = dict(interval=100) # For better, more stable performance initialize from COCO load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
1,462
35.575
159
py
DSLA-DSLA
DSLA-DSLA/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict(init_cfg=None), roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=8, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, # [7] yields higher performance than [6] step=[7]) runner = dict( type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 log_config = dict(interval=100) # For better, more stable performance initialize from COCO load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
1,724
35.702128
153
py
DSLA-DSLA
DSLA-DSLA/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=15)
351
31
78
py
DSLA-DSLA
DSLA-DSLA/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
154
24.833333
53
py
DSLA-DSLA
DSLA-DSLA/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet101_gn')))
219
26.5
63
py
DSLA-DSLA
DSLA-DSLA/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_gn')), neck=dict(norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
1,755
34.12
77
py
DSLA-DSLA
DSLA-DSLA/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( norm_cfg=norm_cfg, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), neck=dict(norm_cfg=norm_cfg), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
613
33.111111
79
py
DSLA-DSLA
DSLA-DSLA/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
162
26.166667
56
py
DSLA-DSLA
DSLA-DSLA/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py
_base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
155
25
53
py
DSLA-DSLA
DSLA-DSLA/docs/en/stat.py
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/' files = sorted(glob.glob('../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('../', url_prefix)) with open(f, 'r') as content_file: content = content_file.read() title = content.split('\n')[0].replace('# ', '').strip() ckpts = set(x.lower().strip() for x in re.findall(r'\[model\]\((https?.*)\)', content)) if len(ckpts) == 0: continue _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] assert len(_papertype) > 0 papertype = _papertype[0] paper = set([(papertype, title)]) titles.append(title) num_ckpts += len(ckpts) statsmsg = f""" \t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) """ stats.append((paper, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) msglist = '\n'.join(x for _, _, x in stats) papertypes, papercounts = np.unique([t for t, _ in allpapers], return_counts=True) countstr = '\n'.join( [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) modelzoo = f""" # Model Zoo Statistics * Number of papers: {len(set(titles))} {countstr} * Number of checkpoints: {num_ckpts} {msglist} """ with open('modelzoo_statistics.md', 'w') as f: f.write(modelzoo)
1,519
22.384615
74
py
DSLA-DSLA
DSLA-DSLA/docs/en/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys import pytorch_sphinx_theme sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = 'MMDetection' copyright = '2018-2021, OpenMMLab' author = 'MMDetection Authors' version_file = '../../mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'recommonmark', 'sphinx_markdown_tables', 'sphinx_copybutton', ] autodoc_mock_imports = [ 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'sphinx_rtd_theme' html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { 'menu': [ { 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdetection' }, ], # Specify the language of shared menu 'menu_lang': 'en' } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = ['css/readthedocs.css'] # -- Extension configuration ------------------------------------------------- # Ignore >>> when copying code copybutton_prompt_text = r'>>> |\.\.\. ' copybutton_prompt_is_regexp = True def builder_inited_handler(app): subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler)
3,373
28.596491
79
py
DSLA-DSLA
DSLA-DSLA/docs/zh_cn/stat.py
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/' files = sorted(glob.glob('../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('../', url_prefix)) with open(f, 'r') as content_file: content = content_file.read() title = content.split('\n')[0].replace('# ', '').strip() ckpts = set(x.lower().strip() for x in re.findall(r'\[model\]\((https?.*)\)', content)) if len(ckpts) == 0: continue _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] assert len(_papertype) > 0 papertype = _papertype[0] paper = set([(papertype, title)]) titles.append(title) num_ckpts += len(ckpts) statsmsg = f""" \t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) """ stats.append((paper, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) msglist = '\n'.join(x for _, _, x in stats) papertypes, papercounts = np.unique([t for t, _ in allpapers], return_counts=True) countstr = '\n'.join( [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) modelzoo = f""" # Model Zoo Statistics * Number of papers: {len(set(titles))} {countstr} * Number of checkpoints: {num_ckpts} {msglist} """ with open('modelzoo_statistics.md', 'w') as f: f.write(modelzoo)
1,519
22.384615
74
py
DSLA-DSLA
DSLA-DSLA/docs/zh_cn/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys import pytorch_sphinx_theme sys.path.insert(0, os.path.abspath('../../')) # -- Project information ----------------------------------------------------- project = 'MMDetection' copyright = '2018-2021, OpenMMLab' author = 'MMDetection Authors' version_file = '../../mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'recommonmark', 'sphinx_markdown_tables', 'sphinx_copybutton', ] autodoc_mock_imports = [ 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'sphinx_rtd_theme' html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { 'menu': [ { 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdetection' }, ], # Specify the language of shared menu 'menu_lang': 'cn', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = ['css/readthedocs.css'] language = 'zh_CN' # -- Extension configuration ------------------------------------------------- # Ignore >>> when copying code copybutton_prompt_text = r'>>> |\.\.\. ' copybutton_prompt_is_regexp = True def builder_inited_handler(app): subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler)
3,395
28.275862
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/version.py
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '2.20.0' short_version = __version__ def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__)
529
25.5
56
py
DSLA-DSLA
DSLA-DSLA/mmdet/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') digit_version.append(int(patch_version[0]) - 1) digit_version.append(int(patch_version[1])) return digit_version mmcv_minimum_version = '1.3.17' mmcv_maximum_version = '1.5.0' mmcv_version = digit_version(mmcv.__version__) assert (mmcv_version >= digit_version(mmcv_minimum_version) and mmcv_version <= digit_version(mmcv_maximum_version)), \ f'MMCV=={mmcv.__version__} is used but incompatible. ' \ f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' __all__ = ['__version__', 'short_version']
909
29.333333
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/apis/inference.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv import numpy as np import torch from mmcv.ops import RoIPool from mmcv.parallel import collate, scatter from mmcv.runner import load_checkpoint from mmdet.core import get_classes from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose from mmdet.models import build_detector def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): """Initialize a detector from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. cfg_options (dict): Options to override some settings in the used config. Returns: nn.Module: The constructed detector. """ if isinstance(config, str): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if cfg_options is not None: config.merge_from_dict(cfg_options) config.model.pretrained = None config.model.train_cfg = None model = build_detector(config.model, test_cfg=config.get('test_cfg')) if checkpoint is not None: checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn('Class names are not saved in the checkpoint\'s ' 'meta data, use COCO classes by default.') model.CLASSES = get_classes('coco') model.cfg = config # save the config in the model for convenience model.to(device) model.eval() return model class LoadImage: """Deprecated. A simple pipeline to load image. """ def __call__(self, results): """Call function to load images into results. Args: results (dict): A result dict contains the file name of the image to be read. Returns: dict: ``results`` will be returned containing loaded image. """ warnings.simplefilter('once') warnings.warn('`LoadImage` is deprecated and will be removed in ' 'future releases. You may use `LoadImageFromWebcam` ' 'from `mmdet.datasets.pipelines.` instead.') if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results def inference_detector(model, imgs): """Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]): Either image files or loaded images. Returns: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly. """ if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg device = next(model.parameters()).device # model device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) # just get the actual data from DataContainer data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # forward the model with torch.no_grad(): results = model(return_loss=False, rescale=True, **data) if not is_batch: return results[0] else: return results async def async_inference_detector(model, imgs): """Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. """ if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = model.cfg device = next(model.parameters()).device # model device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) # just get the actual data from DataContainer data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) results = await model.aforward_test(rescale=True, **data) return results def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0): """Visualize the detection results on the image. Args: model (nn.Module): The loaded detector. img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. score_thr (float): The threshold to visualize the bboxes and masks. title (str): Title of the pyplot figure. wait_time (float): Value of waitKey param. Default: 0. """ if hasattr(model, 'module'): model = model.module model.show_result( img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=(72, 101, 241), text_color=(72, 101, 241))
7,980
32.116183
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/apis/test.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pickle import shutil import tempfile import time import mmcv import torch import torch.distributed as dist from mmcv.image import tensor2imgs from mmcv.runner import get_dist_info from mmdet.core import encode_mask_results def single_gpu_test(model, data_loader, show=False, out_dir=None, show_score_thr=0.3): model.eval() results = [] dataset = data_loader.dataset prog_bar = mmcv.ProgressBar(len(dataset)) for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) batch_size = len(result) if show or out_dir: if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): img_tensor = data['img'][0] else: img_tensor = data['img'][0].data[0] img_metas = data['img_metas'][0].data[0] imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) assert len(imgs) == len(img_metas) for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): h, w, _ = img_meta['img_shape'] img_show = img[:h, :w, :] ori_h, ori_w = img_meta['ori_shape'][:-1] img_show = mmcv.imresize(img_show, (ori_w, ori_h)) if out_dir: out_file = osp.join(out_dir, img_meta['ori_filename']) else: out_file = None model.module.show_result( img_show, result[i], show=show, out_file=out_file, score_thr=show_score_thr) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] results.extend(result) for _ in range(batch_size): prog_bar.update() return results def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' it encodes results to gpu tensors and use gpu communication for results collection. On cpu mode it saves the results on different gpus to 'tmpdir' and collects them by the rank 0 worker. Args: model (nn.Module): Model to be tested. data_loader (nn.Dataloader): Pytorch data loader. tmpdir (str): Path of directory to save the temporary results from different gpus under cpu mode. gpu_collect (bool): Option to use either gpu or cpu to collect results. Returns: list: The prediction results. """ model.eval() results = [] dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) # This line can prevent deadlock problem in some cases. for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] results.extend(result) if rank == 0: batch_size = len(result) for _ in range(batch_size * world_size): prog_bar.update() # collect results from all ranks if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results def collect_results_cpu(result_part, size, tmpdir=None): rank, world_size = get_dist_info() # create a tmp dir if it is not specified if tmpdir is None: MAX_LEN = 512 # 32 is whitespace dir_tensor = torch.full((MAX_LEN, ), 32, dtype=torch.uint8, device='cuda') if rank == 0: mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor( bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) # dump the part result to the dir mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) dist.barrier() # collect all parts if rank != 0: return None else: # load results of all parts from tmp dir part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, f'part_{i}.pkl') part_list.append(mmcv.load(part_file)) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] # remove tmp dir shutil.rmtree(tmpdir) return ordered_results def collect_results_gpu(result_part, size): rank, world_size = get_dist_info() # dump result part to tensor with pickle part_tensor = torch.tensor( bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') # gather all result part tensor shape shape_tensor = torch.tensor(part_tensor.shape, device='cuda') shape_list = [shape_tensor.clone() for _ in range(world_size)] dist.all_gather(shape_list, shape_tensor) # padding result part tensor to max length shape_max = torch.tensor(shape_list).max() part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') part_send[:shape_tensor[0]] = part_tensor part_recv_list = [ part_tensor.new_zeros(shape_max) for _ in range(world_size) ] # gather all result part dist.all_gather(part_recv_list, part_send) if rank == 0: part_list = [] for recv, shape in zip(part_recv_list, shape_list): part_list.append( pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] return ordered_results
6,874
34.807292
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/apis/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .inference import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) from .test import multi_gpu_test, single_gpu_test from .train import (get_root_logger, init_random_seed, set_random_seed, train_detector) __all__ = [ 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 'multi_gpu_test', 'single_gpu_test', 'init_random_seed' ]
563
42.384615
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/apis/train.py
# Copyright (c) OpenMMLab. All rights reserved. import random import warnings import numpy as np import torch import torch.distributed as dist from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, Fp16OptimizerHook, OptimizerHook, build_optimizer, build_runner, get_dist_info) from mmdet.core import DistEvalHook, EvalHook from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.utils import find_latest_checkpoint, get_root_logger def init_random_seed(seed=None, device='cuda'): """Initialize random seed. If the seed is not set, the seed will be automatically randomized, and then broadcast to all processes to prevent some potential bugs. Args: seed (int, Optional): The seed. Default to None. device (str): The device where the seed will be put on. Default to 'cuda'. Returns: int: Seed to be used. """ if seed is not None: return seed # Make sure all ranks share the same random seed to prevent # some potential bugs. Please refer to # https://github.com/open-mmlab/mmdetection/issues/6339 rank, world_size = get_dist_info() seed = np.random.randint(2**31) if world_size == 1: return seed if rank == 0: random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item() def set_random_seed(seed, deterministic=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(log_level=cfg.log_level) # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] if 'imgs_per_gpu' in cfg.data: logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. ' 'Please use "samples_per_gpu" instead') if 'samples_per_gpu' in cfg.data: logger.warning( f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' f'={cfg.data.imgs_per_gpu} is used in this experiments') else: logger.warning( 'Automatically set "samples_per_gpu"="imgs_per_gpu"=' f'{cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[ 'type'] data_loaders = [ build_dataloader( ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, # `num_gpus` will be ignored if distributed num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=cfg.data.get('persistent_workers', False)) for ds in dataset ] # put model on gpus if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) # Sets the `find_unused_parameters` parameter in # torch.nn.parallel.DistributedDataParallel model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = MMDataParallel( model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) # build runner optimizer = build_optimizer(model, cfg.optimizer) if 'runner' not in cfg: cfg.runner = { 'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs } warnings.warn( 'config is now expected to have a `runner` section, ' 'please set `runner` in your config.', UserWarning) else: if 'total_epochs' in cfg: assert cfg.total_epochs == cfg.runner.max_epochs runner = build_runner( cfg.runner, default_args=dict( model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) # an ugly workaround to make .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif distributed and 'type' not in cfg.optimizer_config: optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config # register hooks runner.register_training_hooks( cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: # Support batch_size > 1 in validation val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) if val_samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.val.pipeline = replace_ImageToTensor( cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader( val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' eval_hook = DistEvalHook if distributed else EvalHook # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. runner.register_hook( eval_hook(val_dataloader, **eval_cfg), priority='LOW') resume_from = None if cfg.resume_from is None and cfg.get('auto_resume'): resume_from = find_latest_checkpoint(cfg.work_dir) if resume_from is not None: cfg.resume_from = resume_from if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow)
7,445
34.457143
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403 from .utils import * # noqa: F401, F403
399
39
50
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/evaluation/class_names.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv def wider_face_classes(): return ['face'] def voc_classes(): return [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] def imagenet_det_classes(): return [ 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra' ] def imagenet_vid_classes(): return [ 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra' ] def coco_classes(): return [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' ] def cityscapes_classes(): return [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle' ] dataset_aliases = { 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], 'coco': ['coco', 'mscoco', 'ms_coco'], 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], 'cityscapes': ['cityscapes'] } def get_classes(dataset): """Get class names of a dataset.""" alias2name = {} for name, aliases in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if dataset in alias2name: labels = eval(alias2name[dataset] + '_classes()') else: raise ValueError(f'Unrecognized dataset: {dataset}') else: raise TypeError(f'dataset must a str, but got {type(dataset)}') return labels
5,474
45.398305
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/evaluation/recall.py
# Copyright (c) OpenMMLab. All rights reserved. from collections.abc import Sequence import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps def _recalls(all_ious, proposal_nums, thrs): img_num = all_ious.shape[0] total_gt_num = sum([ious.shape[0] for ious in all_ious]) _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) for k, proposal_num in enumerate(proposal_nums): tmp_ious = np.zeros(0) for i in range(img_num): ious = all_ious[i][:, :proposal_num].copy() gt_ious = np.zeros((ious.shape[0])) if ious.size == 0: tmp_ious = np.hstack((tmp_ious, gt_ious)) continue for j in range(ious.shape[0]): gt_max_overlaps = ious.argmax(axis=1) max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] gt_idx = max_ious.argmax() gt_ious[j] = max_ious[gt_idx] box_idx = gt_max_overlaps[gt_idx] ious[gt_idx, :] = -1 ious[:, box_idx] = -1 tmp_ious = np.hstack((tmp_ious, gt_ious)) _ious[k, :] = tmp_ious _ious = np.fliplr(np.sort(_ious, axis=1)) recalls = np.zeros((proposal_nums.size, thrs.size)) for i, thr in enumerate(thrs): recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) return recalls def set_recall_param(proposal_nums, iou_thrs): """Check proposal_nums and iou_thrs and set correct format.""" if isinstance(proposal_nums, Sequence): _proposal_nums = np.array(proposal_nums) elif isinstance(proposal_nums, int): _proposal_nums = np.array([proposal_nums]) else: _proposal_nums = proposal_nums if iou_thrs is None: _iou_thrs = np.array([0.5]) elif isinstance(iou_thrs, Sequence): _iou_thrs = np.array(iou_thrs) elif isinstance(iou_thrs, float): _iou_thrs = np.array([iou_thrs]) else: _iou_thrs = iou_thrs return _proposal_nums, _iou_thrs def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=0.5, logger=None, use_legacy_coordinate=False): """Calculate recalls. Args: gts (list[ndarray]): a list of arrays of shape (n, 4) proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. logger (logging.Logger | str | None): The way to print the recall summary. See `mmcv.utils.print_log()` for details. Default: None. use_legacy_coordinate (bool): Whether use coordinate system in mmdet v1.x. "1" was added to both height and width which means w, h should be computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False. Returns: ndarray: recalls of different ious and proposal nums """ img_num = len(gts) assert img_num == len(proposals) proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) all_ious = [] for i in range(img_num): if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: scores = proposals[i][:, 4] sort_idx = np.argsort(scores)[::-1] img_proposal = proposals[i][sort_idx, :] else: img_proposal = proposals[i] prop_num = min(img_proposal.shape[0], proposal_nums[-1]) if gts[i] is None or gts[i].shape[0] == 0: ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) else: ious = bbox_overlaps( gts[i], img_proposal[:prop_num, :4], use_legacy_coordinate=use_legacy_coordinate) all_ious.append(ious) all_ious = np.array(all_ious) recalls = _recalls(all_ious, proposal_nums, iou_thrs) print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) return recalls def print_recall_summary(recalls, proposal_nums, iou_thrs, row_idxs=None, col_idxs=None, logger=None): """Print recalls in a table. Args: recalls (ndarray): calculated from `bbox_recalls` proposal_nums (ndarray or list): top N proposals iou_thrs (ndarray or list): iou thresholds row_idxs (ndarray): which rows(proposal nums) to print col_idxs (ndarray): which cols(iou thresholds) to print logger (logging.Logger | str | None): The way to print the recall summary. See `mmcv.utils.print_log()` for details. Default: None. """ proposal_nums = np.array(proposal_nums, dtype=np.int32) iou_thrs = np.array(iou_thrs) if row_idxs is None: row_idxs = np.arange(proposal_nums.size) if col_idxs is None: col_idxs = np.arange(iou_thrs.size) row_header = [''] + iou_thrs[col_idxs].tolist() table_data = [row_header] for i, num in enumerate(proposal_nums[row_idxs]): row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] row.insert(0, num) table_data.append(row) table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) def plot_num_recall(recalls, proposal_nums): """Plot Proposal_num-Recalls curve. Args: recalls(ndarray or list): shape (k,) proposal_nums(ndarray or list): same shape as `recalls` """ if isinstance(proposal_nums, np.ndarray): _proposal_nums = proposal_nums.tolist() else: _proposal_nums = proposal_nums if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot([0] + _proposal_nums, [0] + _recalls) plt.xlabel('Proposal num') plt.ylabel('Recall') plt.axis([0, proposal_nums.max(), 0, 1]) f.show() def plot_iou_recall(recalls, iou_thrs): """Plot IoU-Recalls curve. Args: recalls(ndarray or list): shape (k,) iou_thrs(ndarray or list): same shape as `recalls` """ if isinstance(iou_thrs, np.ndarray): _iou_thrs = iou_thrs.tolist() else: _iou_thrs = iou_thrs if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot(_iou_thrs + [1.0], _recalls + [0.]) plt.xlabel('IoU') plt.ylabel('Recall') plt.axis([iou_thrs.min(), 1, 0, 1]) f.show()
6,806
33.378788
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/evaluation/eval_hooks.py
# Copyright (c) OpenMMLab. All rights reserved. import bisect import os.path as osp import mmcv import torch.distributed as dist from mmcv.runner import DistEvalHook as BaseDistEvalHook from mmcv.runner import EvalHook as BaseEvalHook from torch.nn.modules.batchnorm import _BatchNorm def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) dynamic_milestones = [0] dynamic_milestones.extend( [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) dynamic_intervals = [start_interval] dynamic_intervals.extend( [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) return dynamic_milestones, dynamic_intervals class EvalHook(BaseEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(EvalHook, self).__init__(*args, **kwargs) self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: self.dynamic_milestones, self.dynamic_intervals = \ _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = runner.epoch if self.by_epoch else runner.iter step = bisect.bisect(self.dynamic_milestones, (progress + 1)) # Dynamically modify the evaluation interval self.interval = self.dynamic_intervals[step - 1] def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" if not self._should_evaluate(runner): return from mmdet.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader, show=False) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if self.save_best: self._save_ckpt(runner, key_score) # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, # in order to avoid strong version dependency, we did not directly # inherit EvalHook but BaseDistEvalHook. class DistEvalHook(BaseDistEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(DistEvalHook, self).__init__(*args, **kwargs) self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: self.dynamic_milestones, self.dynamic_intervals = \ _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = runner.epoch if self.by_epoch else runner.iter step = bisect.bisect(self.dynamic_milestones, (progress + 1)) # Dynamically modify the evaluation interval self.interval = self.dynamic_intervals[step - 1] def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if not self._should_evaluate(runner): return tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test results = multi_gpu_test( runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if self.save_best: self._save_ckpt(runner, key_score)
4,942
37.92126
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/evaluation/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, get_classes, imagenet_det_classes, imagenet_vid_classes, voc_classes) from .eval_hooks import DistEvalHook, EvalHook from .mean_ap import average_precision, eval_map, print_map_summary from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, print_recall_summary) __all__ = [ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall' ]
803
46.294118
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/evaluation/bbox_overlaps.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6, use_legacy_coordinate=False): """Calculate the ious between each bbox of bboxes1 and bboxes2. Args: bboxes1 (ndarray): Shape (n, 4) bboxes2 (ndarray): Shape (k, 4) mode (str): IOU (intersection over union) or IOF (intersection over foreground) use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Note when function is used in `VOCDataset`, it should be True to align with the official implementation `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar` Default: False. Returns: ious (ndarray): Shape (n, k) """ assert mode in ['iou', 'iof'] if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. bboxes1 = bboxes1.astype(np.float32) bboxes2 = bboxes2.astype(np.float32) rows = bboxes1.shape[0] cols = bboxes2.shape[0] ious = np.zeros((rows, cols), dtype=np.float32) if rows * cols == 0: return ious exchange = False if bboxes1.shape[0] > bboxes2.shape[0]: bboxes1, bboxes2 = bboxes2, bboxes1 ious = np.zeros((cols, rows), dtype=np.float32) exchange = True area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * ( bboxes1[:, 3] - bboxes1[:, 1] + extra_length) area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * ( bboxes2[:, 3] - bboxes2[:, 1] + extra_length) for i in range(bboxes1.shape[0]): x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum( y_end - y_start + extra_length, 0) if mode == 'iou': union = area1[i] + area2 - overlap else: union = area1[i] if not exchange else area2 union = np.maximum(union, eps) ious[i, :] = overlap / union if exchange: ious = ious.T return ious
2,454
36.19697
86
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/evaluation/mean_ap.py
# Copyright (c) OpenMMLab. All rights reserved. from multiprocessing import Pool import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps from .class_names import get_classes def average_precision(recalls, precisions, mode='area'): """Calculate average precision (for single or multiple scales). Args: recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) mode (str): 'area' or '11points', 'area' means calculating the area under precision-recall curve, '11points' means calculating the average precision of recalls at [0, 0.1, ..., 1] Returns: float or ndarray: calculated average precision """ no_scale = False if recalls.ndim == 1: no_scale = True recalls = recalls[np.newaxis, :] precisions = precisions[np.newaxis, :] assert recalls.shape == precisions.shape and recalls.ndim == 2 num_scales = recalls.shape[0] ap = np.zeros(num_scales, dtype=np.float32) if mode == 'area': zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) ones = np.ones((num_scales, 1), dtype=recalls.dtype) mrec = np.hstack((zeros, recalls, ones)) mpre = np.hstack((zeros, precisions, zeros)) for i in range(mpre.shape[1] - 1, 0, -1): mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) for i in range(num_scales): ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] ap[i] = np.sum( (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) elif mode == '11points': for i in range(num_scales): for thr in np.arange(0, 1 + 1e-3, 0.1): precs = precisions[i, recalls[i, :] >= thr] prec = precs.max() if precs.size > 0 else 0 ap[i] += prec ap /= 11 else: raise ValueError( 'Unrecognized mode, only "area" and "11points" are supported') if no_scale: ap = ap[0] return ap def tpfp_imagenet(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, default_iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None default_iou_thr (float): IoU threshold to be considered as matched for medium and large bboxes (small ones have special rules). Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp # of a certain scale. tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate) gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), default_iou_thr) # sort all detections by scores in descending order sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = gt_w * gt_h gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: max_iou = -1 matched_gt = -1 # find best overlapped available gt for j in range(num_gts): # different from PASCAL VOC: allow finding other gts if the # best overlapped ones are already matched by other det bboxes if gt_covered[j]: continue elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: max_iou = ious[i, j] matched_gt = j # there are 4 cases for a det bbox: # 1. it matches a gt, tp = 1, fp = 0 # 2. it matches an ignored gt, tp = 0, fp = 0 # 3. it matches no gt and within area range, tp = 0, fp = 1 # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 if matched_gt >= 0: gt_covered[matched_gt] = 1 if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): tp[k, i] = 1 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def tpfp_default(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=np.bool), np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of # a certain scale tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) # if there is no gt bboxes in this image, then all det bboxes # within area range are false positives if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) # for each det, the max iou with all gts ious_max = ious.max(axis=1) # for each det, which gt overlaps most with it ious_argmax = ious.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: if ious_max[i] >= iou_thr: matched_gt = ious_argmax[i] if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not gt_covered[matched_gt]: gt_covered[matched_gt] = True tp[k, i] = 1 else: fp[k, i] = 1 # otherwise ignore this detected bbox, tp = 0, fp = 0 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def get_cls_results(det_results, annotations, class_id): """Get det results and gt information of a certain class. Args: det_results (list[list]): Same as `eval_map()`. annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes """ cls_dets = [img_res[class_id] for img_res in det_results] cls_gts = [] cls_gts_ignore = [] for ann in annotations: gt_inds = ann['labels'] == class_id cls_gts.append(ann['bboxes'][gt_inds, :]) if ann.get('labels_ignore', None) is not None: ignore_inds = ann['labels_ignore'] == class_id cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) else: cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) return cls_dets, cls_gts, cls_gts_ignore def eval_map(det_results, annotations, scale_ranges=None, iou_thr=0.5, dataset=None, logger=None, tpfp_fn=None, nproc=4, use_legacy_coordinate=False): """Evaluate mAP of a dataset. Args: det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. The outer list indicates images, and the inner list indicates per-class detected bboxes. annotations (list[dict]): Ground truth annotations where each item of the list indicates an image. Keys of annotations are: - `bboxes`: numpy array of shape (n, 4) - `labels`: numpy array of shape (n, ) - `bboxes_ignore` (optional): numpy array of shape (k, 4) - `labels_ignore` (optional): numpy array of shape (k, ) scale_ranges (list[tuple] | None): Range of scales to be evaluated, in the format [(min1, max1), (min2, max2), ...]. A range of (32, 64) means the area range between (32**2, 64**2). Default: None. iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. dataset (list[str] | str | None): Dataset name or dataset classes, there are minor differences in metrics for different datasets, e.g. "voc07", "imagenet_det", etc. Default: None. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. tpfp_fn (callable | None): The function used to determine true/ false positives. If None, :func:`tpfp_default` is used as default unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this case). If it is given as a function, then this function is used to evaluate tp & fp. Default None. nproc (int): Processes used for computing TP and FP. Default: 4. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple: (mAP, [dict, dict, ...]) """ assert len(det_results) == len(annotations) if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. num_imgs = len(det_results) num_scales = len(scale_ranges) if scale_ranges is not None else 1 num_classes = len(det_results[0]) # positive class num area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] if scale_ranges is not None else None) pool = Pool(nproc) eval_results = [] for i in range(num_classes): # get gt and det bboxes of this class cls_dets, cls_gts, cls_gts_ignore = get_cls_results( det_results, annotations, i) # choose proper function according to datasets to compute tp and fp if tpfp_fn is None: if dataset in ['det', 'vid']: tpfp_fn = tpfp_imagenet else: tpfp_fn = tpfp_default if not callable(tpfp_fn): raise ValueError( f'tpfp_fn has to be a function or None, but got {tpfp_fn}') # compute tp and fp for each image with multiple processes tpfp = pool.starmap( tpfp_fn, zip(cls_dets, cls_gts, cls_gts_ignore, [iou_thr for _ in range(num_imgs)], [area_ranges for _ in range(num_imgs)], [use_legacy_coordinate for _ in range(num_imgs)])) tp, fp = tuple(zip(*tpfp)) # calculate gt number of each scale # ignored gts or gts beyond the specific scale are not counted num_gts = np.zeros(num_scales, dtype=int) for j, bbox in enumerate(cls_gts): if area_ranges is None: num_gts[0] += bbox.shape[0] else: gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * ( bbox[:, 3] - bbox[:, 1] + extra_length) for k, (min_area, max_area) in enumerate(area_ranges): num_gts[k] += np.sum((gt_areas >= min_area) & (gt_areas < max_area)) # sort all det bboxes by score, also sort tp and fp cls_dets = np.vstack(cls_dets) num_dets = cls_dets.shape[0] sort_inds = np.argsort(-cls_dets[:, -1]) tp = np.hstack(tp)[:, sort_inds] fp = np.hstack(fp)[:, sort_inds] # calculate recall and precision with tp and fp tp = np.cumsum(tp, axis=1) fp = np.cumsum(fp, axis=1) eps = np.finfo(np.float32).eps recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) precisions = tp / np.maximum((tp + fp), eps) # calculate AP if scale_ranges is None: recalls = recalls[0, :] precisions = precisions[0, :] num_gts = num_gts.item() mode = 'area' if dataset != 'voc07' else '11points' ap = average_precision(recalls, precisions, mode) eval_results.append({ 'num_gts': num_gts, 'num_dets': num_dets, 'recall': recalls, 'precision': precisions, 'ap': ap }) pool.close() if scale_ranges is not None: # shape (num_classes, num_scales) all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) all_num_gts = np.vstack( [cls_result['num_gts'] for cls_result in eval_results]) mean_ap = [] for i in range(num_scales): if np.any(all_num_gts[:, i] > 0): mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) else: mean_ap.append(0.0) else: aps = [] for cls_result in eval_results: if cls_result['num_gts'] > 0: aps.append(cls_result['ap']) mean_ap = np.array(aps).mean().item() if aps else 0.0 print_map_summary( mean_ap, eval_results, dataset, area_ranges, logger=logger) return mean_ap, eval_results def print_map_summary(mean_ap, results, dataset=None, scale_ranges=None, logger=None): """Print mAP and results of each class. A table will be printed to show the gts/dets/recall/AP of each class and the mAP. Args: mean_ap (float): Calculated from `eval_map()`. results (list[dict]): Calculated from `eval_map()`. dataset (list[str] | str | None): Dataset name or dataset classes. scale_ranges (list[tuple] | None): Range of scales to be evaluated. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. """ if logger == 'silent': return if isinstance(results[0]['ap'], np.ndarray): num_scales = len(results[0]['ap']) else: num_scales = 1 if scale_ranges is not None: assert len(scale_ranges) == num_scales num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(num_classes)] elif mmcv.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'ap'] for i in range(num_scales): if scale_ranges is not None: print_log(f'Scale range {scale_ranges[i]}', logger=logger) table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' ] table_data.append(row_data) table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) table = AsciiTable(table_data) table.inner_footing_row_border = True print_log('\n' + table.table, logger=logger)
20,724
39.637255
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/post_processing/merge_augs.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import numpy as np import torch from mmcv import ConfigDict from mmcv.ops import nms from ..bbox import bbox_mapping_back def merge_aug_proposals(aug_proposals, img_metas, cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ f'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip, flip_direction) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), aug_proposals[:, -1].contiguous(), cfg.nms.iou_threshold) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(cfg.max_per_img, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): """Merge augmented mask prediction. Args: aug_masks (list[ndarray]): shape (n, #class, h, w) img_shapes (list[ndarray]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_masks = [] for mask, img_info in zip(aug_masks, img_metas): flip = img_info[0]['flip'] if flip: flip_direction = img_info[0]['flip_direction'] if flip_direction == 'horizontal': mask = mask[:, :, :, ::-1] elif flip_direction == 'vertical': mask = mask[:, :, ::-1, :] elif flip_direction == 'diagonal': mask = mask[:, :, :, ::-1] mask = mask[:, :, ::-1, :] else: raise ValueError( f"Invalid flipping direction '{flip_direction}'") recovered_masks.append(mask) if weights is None: merged_masks = np.mean(recovered_masks, axis=0) else: merged_masks = np.average( np.array(recovered_masks), axis=0, weights=np.array(weights)) return merged_masks
5,790
36.36129
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/post_processing/bbox_nms.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops.nms import batched_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None, return_inds=False): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. score_factors (Tensor, optional): The factors multiplied to scores before applying NMS. Default to None. return_inds (bool, optional): Whether return the indices of kept bboxes. Default to False. Returns: tuple: (dets, labels, indices (optional)), tensors of shape (k, 5), (k), and (k). Dets are boxes with scores. Labels are 0-based. """ num_classes = multi_scores.size(1) - 1 # exclude background category if multi_bboxes.shape[1] > 4: bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) else: bboxes = multi_bboxes[:, None].expand( multi_scores.size(0), num_classes, 4) scores = multi_scores[:, :-1] labels = torch.arange(num_classes, dtype=torch.long, device=scores.device) labels = labels.view(1, -1).expand_as(scores) bboxes = bboxes.reshape(-1, 4) scores = scores.reshape(-1) labels = labels.reshape(-1) if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT # remove low scoring boxes valid_mask = scores > score_thr # multiply score_factor after threshold to preserve more bboxes, improve # mAP by 1% for YOLOv3 if score_factors is not None: # expand the shape to match original shape of score score_factors = score_factors.view(-1, 1).expand( multi_scores.size(0), num_classes) score_factors = score_factors.reshape(-1) scores = scores * score_factors if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT inds = valid_mask.nonzero(as_tuple=False).squeeze(1) bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] else: # TensorRT NMS plugin has invalid output filled with -1 # add dummy data to make detection output correct. bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) scores = torch.cat([scores, scores.new_zeros(1)], dim=0) labels = torch.cat([labels, labels.new_zeros(1)], dim=0) if bboxes.numel() == 0: if torch.onnx.is_in_onnx_export(): raise RuntimeError('[ONNX Error] Can not record NMS ' 'as it has not been executed this time') dets = torch.cat([bboxes, scores[:, None]], -1) if return_inds: return dets, labels, inds else: return dets, labels dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) if max_num > 0: dets = dets[:max_num] keep = keep[:max_num] if return_inds: return dets, labels[keep], inds[keep] else: return dets, labels[keep] def fast_nms(multi_bboxes, multi_scores, multi_coeffs, score_thr, iou_thr, top_k, max_num=-1): """Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Dets are boxes with scores. Labels are 0-based. """ scores = multi_scores[:, :-1].t() # [#class, n] scores, idx = scores.sort(1, descending=True) idx = idx[:, :top_k].contiguous() scores = scores[:, :top_k] # [#class, topk] num_classes, num_dets = idx.size() boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] iou.triu_(diagonal=1) iou_max, _ = iou.max(dim=1) # Now just filter out the ones higher than the threshold keep = iou_max <= iou_thr # Second thresholding introduces 0.2 mAP gain at negligible time cost keep *= scores > score_thr # Assign each kept detection to its corresponding class classes = torch.arange( num_classes, device=boxes.device)[:, None].expand_as(keep) classes = classes[keep] boxes = boxes[keep] coeffs = coeffs[keep] scores = scores[keep] # Only keep the top max_num highest scores across all classes scores, idx = scores.sort(0, descending=True) if max_num > 0: idx = idx[:max_num] scores = scores[:max_num] classes = classes[idx] boxes = boxes[idx] coeffs = coeffs[idx] cls_dets = torch.cat([boxes, scores[:, None]], dim=1) return cls_dets, classes, coeffs
6,461
36.569767
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/post_processing/matrix_nms.py
# Copyright (c) OpenMMLab. All rights reserved. import torch def mask_matrix_nms(masks, labels, scores, filter_thr=-1, nms_pre=-1, max_num=-1, kernel='gaussian', sigma=2.0, mask_area=None): """Matrix NMS for multi-class masks. Args: masks (Tensor): Has shape (num_instances, h, w) labels (Tensor): Labels of corresponding masks, has shape (num_instances,). scores (Tensor): Mask scores of corresponding masks, has shape (num_instances). filter_thr (float): Score threshold to filter the masks after matrix nms. Default: -1, which means do not use filter_thr. nms_pre (int): The max number of instances to do the matrix nms. Default: -1, which means do not use nms_pre. max_num (int, optional): If there are more than max_num masks after matrix, only top max_num will be kept. Default: -1, which means do not use max_num. kernel (str): 'linear' or 'gaussian'. sigma (float): std in gaussian method. mask_area (Tensor): The sum of seg_masks. Returns: tuple(Tensor): Processed mask results. - scores (Tensor): Updated scores, has shape (n,). - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of the remaining mask in the input mask, has shape (n,). """ assert len(labels) == len(masks) == len(scores) if len(labels) == 0: return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) if mask_area is None: mask_area = masks.sum((1, 2)).float() else: assert len(masks) == len(mask_area) # sort and keep top nms_pre scores, sort_inds = torch.sort(scores, descending=True) keep_inds = sort_inds if nms_pre > 0 and len(sort_inds) > nms_pre: sort_inds = sort_inds[:nms_pre] keep_inds = keep_inds[:nms_pre] scores = scores[:nms_pre] masks = masks[sort_inds] mask_area = mask_area[sort_inds] labels = labels[sort_inds] num_masks = len(labels) flatten_masks = masks.reshape(num_masks, -1).float() # inter. inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) expanded_mask_area = mask_area.expand(num_masks, num_masks) # Upper triangle iou matrix. iou_matrix = (inter_matrix / (expanded_mask_area + expanded_mask_area.transpose(1, 0) - inter_matrix)).triu(diagonal=1) # label_specific matrix. expanded_labels = labels.expand(num_masks, num_masks) # Upper triangle label matrix. label_matrix = (expanded_labels == expanded_labels.transpose( 1, 0)).triu(diagonal=1) # IoU compensation compensate_iou, _ = (iou_matrix * label_matrix).max(0) compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0) # IoU decay decay_iou = iou_matrix * label_matrix # Calculate the decay_coefficient if kernel == 'gaussian': decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) elif kernel == 'linear': decay_matrix = (1 - decay_iou) / (1 - compensate_iou) decay_coefficient, _ = decay_matrix.min(0) else: raise NotImplementedError( f'{kernel} kernel is not supported in matrix nms!') # update the score. scores = scores * decay_coefficient if filter_thr > 0: keep = scores >= filter_thr keep_inds = keep_inds[keep] if not keep.any(): return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) masks = masks[keep] scores = scores[keep] labels = labels[keep] # sort and keep top max_num scores, sort_inds = torch.sort(scores, descending=True) keep_inds = keep_inds[sort_inds] if max_num > 0 and len(sort_inds) > max_num: sort_inds = sort_inds[:max_num] keep_inds = keep_inds[:max_num] scores = scores[:max_num] masks = masks[sort_inds] labels = labels[sort_inds] return scores, labels, masks, keep_inds
4,622
36.893443
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/post_processing/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .bbox_nms import fast_nms, multiclass_nms from .matrix_nms import mask_matrix_nms from .merge_augs import (merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, merge_aug_scores) __all__ = [ 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms' ]
412
36.545455
72
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/mask/structures.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.ops.roi_align import roi_align class BaseInstanceMasks(metaclass=ABCMeta): """Base class for instance masks.""" @abstractmethod def rescale(self, scale, interpolation='nearest'): """Rescale masks as large as possible while keeping the aspect ratio. For details can refer to `mmcv.imrescale`. Args: scale (tuple[int]): The maximum size (h, w) of rescaled mask. interpolation (str): Same as :func:`mmcv.imrescale`. Returns: BaseInstanceMasks: The rescaled masks. """ @abstractmethod def resize(self, out_shape, interpolation='nearest'): """Resize masks to the given out_shape. Args: out_shape: Target (h, w) of resized mask. interpolation (str): See :func:`mmcv.imresize`. Returns: BaseInstanceMasks: The resized masks. """ @abstractmethod def flip(self, flip_direction='horizontal'): """Flip masks alone the given direction. Args: flip_direction (str): Either 'horizontal' or 'vertical'. Returns: BaseInstanceMasks: The flipped masks. """ @abstractmethod def pad(self, out_shape, pad_val): """Pad masks to the given size of (h, w). Args: out_shape (tuple[int]): Target (h, w) of padded mask. pad_val (int): The padded value. Returns: BaseInstanceMasks: The padded masks. """ @abstractmethod def crop(self, bbox): """Crop each mask by the given bbox. Args: bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). Return: BaseInstanceMasks: The cropped masks. """ @abstractmethod def crop_and_resize(self, bboxes, out_shape, inds, device, interpolation='bilinear', binarize=True): """Crop and resize masks by the given bboxes. This function is mainly used in mask targets computation. It firstly align mask to bboxes by assigned_inds, then crop mask by the assigned bbox and resize to the size of (mask_h, mask_w) Args: bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) out_shape (tuple[int]): Target (h, w) of resized mask inds (ndarray): Indexes to assign masks to each bbox, shape (N,) and values should be between [0, num_masks - 1]. device (str): Device of bboxes interpolation (str): See `mmcv.imresize` binarize (bool): if True fractional values are rounded to 0 or 1 after the resize operation. if False and unsupported an error will be raised. Defaults to True. Return: BaseInstanceMasks: the cropped and resized masks. """ @abstractmethod def expand(self, expanded_h, expanded_w, top, left): """see :class:`Expand`.""" @property @abstractmethod def areas(self): """ndarray: areas of each instance.""" @abstractmethod def to_ndarray(self): """Convert masks to the format of ndarray. Return: ndarray: Converted masks in the format of ndarray. """ @abstractmethod def to_tensor(self, dtype, device): """Convert masks to the format of Tensor. Args: dtype (str): Dtype of converted mask. device (torch.device): Device of converted masks. Returns: Tensor: Converted masks in the format of Tensor. """ @abstractmethod def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: Translated masks. """ def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. Default 0. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: ndarray: Sheared masks. """ @abstractmethod def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: Rotated masks. """ class BitmapMasks(BaseInstanceMasks): """This class represents masks in the form of bitmaps. Args: masks (ndarray): ndarray of masks in shape (N, H, W), where N is the number of objects. height (int): height of masks width (int): width of masks Example: >>> from mmdet.core.mask.structures import * # NOQA >>> num_masks, H, W = 3, 32, 32 >>> rng = np.random.RandomState(0) >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) >>> self = BitmapMasks(masks, height=H, width=W) >>> # demo crop_and_resize >>> num_boxes = 5 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (14, 14) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): self.height = height self.width = width if len(masks) == 0: self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) else: assert isinstance(masks, (list, np.ndarray)) if isinstance(masks, list): assert isinstance(masks[0], np.ndarray) assert masks[0].ndim == 2 # (H, W) else: assert masks.ndim == 3 # (N, H, W) self.masks = np.stack(masks).reshape(-1, height, width) assert self.masks.shape[1] == self.height assert self.masks.shape[2] == self.width def __getitem__(self, index): """Index the BitmapMask. Args: index (int | ndarray): Indices in the format of integer or ndarray. Returns: :obj:`BitmapMasks`: Indexed bitmap masks. """ masks = self.masks[index].reshape(-1, self.height, self.width) return BitmapMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation='nearest'): """See :func:`BaseInstanceMasks.rescale`.""" if len(self.masks) == 0: new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) else: rescaled_masks = np.stack([ mmcv.imrescale(mask, scale, interpolation=interpolation) for mask in self.masks ]) height, width = rescaled_masks.shape[1:] return BitmapMasks(rescaled_masks, height, width) def resize(self, out_shape, interpolation='nearest'): """See :func:`BaseInstanceMasks.resize`.""" if len(self.masks) == 0: resized_masks = np.empty((0, *out_shape), dtype=np.uint8) else: resized_masks = np.stack([ mmcv.imresize( mask, out_shape[::-1], interpolation=interpolation) for mask in self.masks ]) return BitmapMasks(resized_masks, *out_shape) def flip(self, flip_direction='horizontal'): """See :func:`BaseInstanceMasks.flip`.""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = self.masks else: flipped_masks = np.stack([ mmcv.imflip(mask, direction=flip_direction) for mask in self.masks ]) return BitmapMasks(flipped_masks, self.height, self.width) def pad(self, out_shape, pad_val=0): """See :func:`BaseInstanceMasks.pad`.""" if len(self.masks) == 0: padded_masks = np.empty((0, *out_shape), dtype=np.uint8) else: padded_masks = np.stack([ mmcv.impad(mask, shape=out_shape, pad_val=pad_val) for mask in self.masks ]) return BitmapMasks(padded_masks, *out_shape) def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w) def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8) return BitmapMasks(empty_masks, *out_shape) # convert bboxes to tensor if isinstance(bboxes, np.ndarray): bboxes = torch.from_numpy(bboxes).to(device=device) if isinstance(inds, np.ndarray): inds = torch.from_numpy(inds).to(device=device) num_bbox = bboxes.shape[0] fake_inds = torch.arange( num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 rois = rois.to(device=device) if num_bbox > 0: gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( 0, inds).to(dtype=rois.dtype) targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, 1.0, 0, 'avg', True).squeeze(1) if binarize: resized_masks = (targets >= 0.5).cpu().numpy() else: resized_masks = targets.cpu().numpy() else: resized_masks = [] return BitmapMasks(resized_masks, *out_shape) def expand(self, expanded_h, expanded_w, top, left): """See :func:`BaseInstanceMasks.expand`.""" if len(self.masks) == 0: expanded_mask = np.empty((0, expanded_h, expanded_w), dtype=np.uint8) else: expanded_mask = np.zeros((len(self), expanded_h, expanded_w), dtype=np.uint8) expanded_mask[:, top:top + self.height, left:left + self.width] = self.masks return BitmapMasks(expanded_mask, expanded_h, expanded_w) def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0 for masks. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: BitmapMasks: Translated BitmapMasks. Example: >>> from mmdet.core.mask.structures import BitmapMasks >>> self = BitmapMasks.random(dtype=np.uint8) >>> out_shape = (32, 32) >>> offset = 4 >>> direction = 'horizontal' >>> fill_val = 0 >>> interpolation = 'bilinear' >>> # Note, There seem to be issues when: >>> # * out_shape is different than self's shape >>> # * the mask dtype is not supported by cv2.AffineWarp >>> new = self.translate(out_shape, offset, direction, fill_val, >>> interpolation) >>> assert len(new) == len(self) >>> assert new.height, new.width == out_shape """ if len(self.masks) == 0: translated_masks = np.empty((0, *out_shape), dtype=np.uint8) else: translated_masks = mmcv.imtranslate( self.masks.transpose((1, 2, 0)), offset, direction, border_value=fill_val, interpolation=interpolation) if translated_masks.ndim == 2: translated_masks = translated_masks[:, :, None] translated_masks = translated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(translated_masks, *out_shape) def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: BitmapMasks: The sheared masks. """ if len(self.masks) == 0: sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) else: sheared_masks = mmcv.imshear( self.masks.transpose((1, 2, 0)), magnitude, direction, border_value=border_value, interpolation=interpolation) if sheared_masks.ndim == 2: sheared_masks = sheared_masks[:, :, None] sheared_masks = sheared_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(sheared_masks, *out_shape) def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: BitmapMasks: Rotated BitmapMasks. """ if len(self.masks) == 0: rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) else: rotated_masks = mmcv.imrotate( self.masks.transpose((1, 2, 0)), angle, center=center, scale=scale, border_value=fill_val) if rotated_masks.ndim == 2: # case when only one mask, (h, w) rotated_masks = rotated_masks[:, :, None] # (h, w, 1) rotated_masks = rotated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(rotated_masks, *out_shape) @property def areas(self): """See :py:attr:`BaseInstanceMasks.areas`.""" return self.masks.sum((1, 2)) def to_ndarray(self): """See :func:`BaseInstanceMasks.to_ndarray`.""" return self.masks def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" return torch.tensor(self.masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, dtype=np.uint8, rng=None): """Generate random bitmap masks for demo / testing purposes. Example: >>> from mmdet.core.mask.structures import BitmapMasks >>> self = BitmapMasks.random() >>> print('self = {}'.format(self)) self = BitmapMasks(num_masks=3, height=32, width=32) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) self = cls(masks, height=height, width=width) return self def get_bboxes(self): num_masks = len(self) boxes = np.zeros((num_masks, 4), dtype=np.float32) x_any = self.masks.any(axis=1) y_any = self.masks.any(axis=2) for idx in range(num_masks): x = np.where(x_any[idx, :])[0] y = np.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: # use +1 for x_max and y_max so that the right and bottom # boundary of instance masks are fully included by the box boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32) return boxes class PolygonMasks(BaseInstanceMasks): """This class represents masks in the form of polygons. Polygons is a list of three levels. The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates Args: masks (list[list[ndarray]]): The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates height (int): height of masks width (int): width of masks Example: >>> from mmdet.core.mask.structures import * # NOQA >>> masks = [ >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] >>> ] >>> height, width = 16, 16 >>> self = PolygonMasks(masks, height, width) >>> # demo translate >>> new = self.translate((16, 16), 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) >>> # demo crop_and_resize >>> num_boxes = 3 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (16, 16) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): assert isinstance(masks, list) if len(masks) > 0: assert isinstance(masks[0], list) assert isinstance(masks[0][0], np.ndarray) self.height = height self.width = width self.masks = masks def __getitem__(self, index): """Index the polygon masks. Args: index (ndarray | List): The indices. Returns: :obj:`PolygonMasks`: The indexed polygon masks. """ if isinstance(index, np.ndarray): index = index.tolist() if isinstance(index, list): masks = [self.masks[i] for i in index] else: try: masks = self.masks[index] except Exception: raise ValueError( f'Unsupported input of type {type(index)} for indexing!') if len(masks) and isinstance(masks[0], np.ndarray): masks = [masks] # ensure a list of three levels return PolygonMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation=None): """see :func:`BaseInstanceMasks.rescale`""" new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) if len(self.masks) == 0: rescaled_masks = PolygonMasks([], new_h, new_w) else: rescaled_masks = self.resize((new_h, new_w)) return rescaled_masks def resize(self, out_shape, interpolation=None): """see :func:`BaseInstanceMasks.resize`""" if len(self.masks) == 0: resized_masks = PolygonMasks([], *out_shape) else: h_scale = out_shape[0] / self.height w_scale = out_shape[1] / self.width resized_masks = [] for poly_per_obj in self.masks: resized_poly = [] for p in poly_per_obj: p = p.copy() p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_poly.append(p) resized_masks.append(resized_poly) resized_masks = PolygonMasks(resized_masks, *out_shape) return resized_masks def flip(self, flip_direction='horizontal'): """see :func:`BaseInstanceMasks.flip`""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = PolygonMasks([], self.height, self.width) else: flipped_masks = [] for poly_per_obj in self.masks: flipped_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if flip_direction == 'horizontal': p[0::2] = self.width - p[0::2] elif flip_direction == 'vertical': p[1::2] = self.height - p[1::2] else: p[0::2] = self.width - p[0::2] p[1::2] = self.height - p[1::2] flipped_poly_per_obj.append(p) flipped_masks.append(flipped_poly_per_obj) flipped_masks = PolygonMasks(flipped_masks, self.height, self.width) return flipped_masks def crop(self, bbox): """see :func:`BaseInstanceMasks.crop`""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = PolygonMasks([], h, w) else: cropped_masks = [] for poly_per_obj in self.masks: cropped_poly_per_obj = [] for p in poly_per_obj: # pycocotools will clip the boundary p = p.copy() p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] cropped_poly_per_obj.append(p) cropped_masks.append(cropped_poly_per_obj) cropped_masks = PolygonMasks(cropped_masks, h, w) return cropped_masks def pad(self, out_shape, pad_val=0): """padding has no effect on polygons`""" return PolygonMasks(self.masks, *out_shape) def expand(self, *args, **kwargs): """TODO: Add expand for polygon""" raise NotImplementedError def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """see :func:`BaseInstanceMasks.crop_and_resize`""" out_h, out_w = out_shape if len(self.masks) == 0: return PolygonMasks([], out_h, out_w) if not binarize: raise ValueError('Polygons are always binary, ' 'setting binarize=False is unsupported') resized_masks = [] for i in range(len(bboxes)): mask = self.masks[inds[i]] bbox = bboxes[i, :] x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) h_scale = out_h / max(h, 0.1) # avoid too large scale w_scale = out_w / max(w, 0.1) resized_mask = [] for p in mask: p = p.copy() # crop # pycocotools will clip the boundary p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] # resize p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_mask.append(p) resized_masks.append(resized_mask) return PolygonMasks(resized_masks, *out_shape) def translate(self, out_shape, offset, direction='horizontal', fill_val=None, interpolation=None): """Translate the PolygonMasks. Example: >>> self = PolygonMasks.random(dtype=np.int) >>> out_shape = (self.height, self.width) >>> new = self.translate(out_shape, 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 """ assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ f'used, and defaultly should be None or 0. got {fill_val}.' if len(self.masks) == 0: translated_masks = PolygonMasks([], *out_shape) else: translated_masks = [] for poly_per_obj in self.masks: translated_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if direction == 'horizontal': p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) elif direction == 'vertical': p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) translated_poly_per_obj.append(p) translated_masks.append(translated_poly_per_obj) translated_masks = PolygonMasks(translated_masks, *out_shape) return translated_masks def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """See :func:`BaseInstanceMasks.shear`.""" if len(self.masks) == 0: sheared_masks = PolygonMasks([], *out_shape) else: sheared_masks = [] if direction == 'horizontal': shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(np.float32) elif direction == 'vertical': shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) for poly_per_obj in self.masks: sheared_poly = [] for p in poly_per_obj: p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] new_coords = np.matmul(shear_matrix, p) # [2, n] new_coords[0, :] = np.clip(new_coords[0, :], 0, out_shape[1]) new_coords[1, :] = np.clip(new_coords[1, :], 0, out_shape[0]) sheared_poly.append( new_coords.transpose((1, 0)).reshape(-1)) sheared_masks.append(sheared_poly) sheared_masks = PolygonMasks(sheared_masks, *out_shape) return sheared_masks def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """See :func:`BaseInstanceMasks.rotate`.""" if len(self.masks) == 0: rotated_masks = PolygonMasks([], *out_shape) else: rotated_masks = [] rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) for poly_per_obj in self.masks: rotated_poly = [] for p in poly_per_obj: p = p.copy() coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] # pad 1 to convert from format [x, y] to homogeneous # coordinates format [x, y, 1] coords = np.concatenate( (coords, np.ones((coords.shape[0], 1), coords.dtype)), axis=1) # [n, 3] rotated_coords = np.matmul( rotate_matrix[None, :, :], coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, out_shape[1]) rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, out_shape[0]) rotated_poly.append(rotated_coords.reshape(-1)) rotated_masks.append(rotated_poly) rotated_masks = PolygonMasks(rotated_masks, *out_shape) return rotated_masks def to_bitmap(self): """convert polygon masks to bitmap masks.""" bitmap_masks = self.to_ndarray() return BitmapMasks(bitmap_masks, self.height, self.width) @property def areas(self): """Compute areas of masks. This func is modified from `detectron2 <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_. The function only works with Polygons using the shoelace formula. Return: ndarray: areas of each instance """ # noqa: W501 area = [] for polygons_per_obj in self.masks: area_per_obj = 0 for p in polygons_per_obj: area_per_obj += self._polygon_area(p[0::2], p[1::2]) area.append(area_per_obj) return np.asarray(area) def _polygon_area(self, x, y): """Compute the area of a component of a polygon. Using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Args: x (ndarray): x coordinates of the component y (ndarray): y coordinates of the component Return: float: the are of the component """ # noqa: 501 return 0.5 * np.abs( np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def to_ndarray(self): """Convert masks to the format of ndarray.""" if len(self.masks) == 0: return np.empty((0, self.height, self.width), dtype=np.uint8) bitmap_masks = [] for poly_per_obj in self.masks: bitmap_masks.append( polygon_to_bitmap(poly_per_obj, self.height, self.width)) return np.stack(bitmap_masks) def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" if len(self.masks) == 0: return torch.empty((0, self.height, self.width), dtype=dtype, device=device) ndarray_masks = self.to_ndarray() return torch.tensor(ndarray_masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, n_verts=5, dtype=np.float32, rng=None): """Generate random polygon masks for demo / testing purposes. Adapted from [1]_ References: .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 Example: >>> from mmdet.core.mask.structures import PolygonMasks >>> self = PolygonMasks.random() >>> print('self = {}'.format(self)) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) def _gen_polygon(n, irregularity, spikeyness): """Creates the polygon by sampling points on a circle around the centre. Random noise is added by varying the angular spacing between sequential points, and by varying the radial distance of each point from the centre. Based on original code by Mike Ounsworth Args: n (int): number of vertices irregularity (float): [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to [0, 2pi/numberOfVerts] spikeyness (float): [0,1] indicating how much variance there is in each vertex from the circle of radius aveRadius. [0,1] will map to [0, aveRadius] Returns: a list of vertices, in CCW order. """ from scipy.stats import truncnorm # Generate around the unit circle cx, cy = (0.0, 0.0) radius = 1 tau = np.pi * 2 irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n spikeyness = np.clip(spikeyness, 1e-9, 1) # generate n angle steps lower = (tau / n) - irregularity upper = (tau / n) + irregularity angle_steps = rng.uniform(lower, upper, n) # normalize the steps so that point 0 and point n+1 are the same k = angle_steps.sum() / (2 * np.pi) angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) # Convert high and low values to be wrt the standard normal range # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html low = 0 high = 2 * radius mean = radius std = spikeyness a = (low - mean) / std b = (high - mean) / std tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) # now generate the points radii = tnorm.rvs(n, random_state=rng) x_pts = cx + radii * np.cos(angles) y_pts = cy + radii * np.sin(angles) points = np.hstack([x_pts[:, None], y_pts[:, None]]) # Scale to 0-1 space points = points - points.min(axis=0) points = points / points.max(axis=0) # Randomly place within 0-1 space points = points * (rng.rand() * .8 + .2) min_pt = points.min(axis=0) max_pt = points.max(axis=0) high = (1 - max_pt) low = (0 - min_pt) offset = (rng.rand(2) * (high - low)) + low points = points + offset return points def _order_vertices(verts): """ References: https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise """ mlat = verts.T[0].sum() / len(verts) mlng = verts.T[1].sum() / len(verts) tau = np.pi * 2 angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + tau) % tau sortx = angle.argsort() verts = verts.take(sortx, axis=0) return verts # Generate a random exterior for each requested mask masks = [] for _ in range(num_masks): exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) exterior = (exterior * [(width, height)]).astype(dtype) masks.append([exterior.ravel()]) self = cls(masks, height, width) return self def get_bboxes(self): num_masks = len(self) boxes = np.zeros((num_masks, 4), dtype=np.float32) for idx, poly_per_obj in enumerate(self.masks): # simply use a number that is big enough for comparison with # coordinates xy_min = np.array([self.width * 2, self.height * 2], dtype=np.float32) xy_max = np.zeros(2, dtype=np.float32) for p in poly_per_obj: xy = np.array(p).reshape(-1, 2).astype(np.float32) xy_min = np.minimum(xy_min, np.min(xy, axis=0)) xy_max = np.maximum(xy_max, np.max(xy, axis=0)) boxes[idx, :2] = xy_min boxes[idx, 2:] = xy_max return boxes def polygon_to_bitmap(polygons, height, width): """Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation """ rles = maskUtils.frPyObjects(polygons, height, width) rle = maskUtils.merge(rles) bitmap_mask = maskUtils.decode(rle).astype(np.bool) return bitmap_mask
40,241
36.539179
141
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/mask/mask_target.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): """Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images. pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals. gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: list[Tensor]: Mask target of each image. Example: >>> import mmcv >>> import mmdet >>> from mmdet.core.mask import BitmapMasks >>> from mmdet.core.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = mmcv.Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = list(mask_targets) if len(mask_targets) > 0: mask_targets = torch.cat(mask_targets) return mask_targets def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): """Compute mask target for each positive proposal in the image. Args: pos_proposals (Tensor): Positive proposals. pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap or Polygon. cfg (dict): Config dict that indicate the mask size. Returns: Tensor: Mask target of each positive proposals in the image. Example: >>> import mmcv >>> import mmdet >>> from mmdet.core.mask import BitmapMasks >>> from mmdet.core.mask.mask_target import * # NOQA >>> H, W = 32, 32 >>> cfg = mmcv.Config({'mask_size': (7, 11)}) >>> rng = np.random.RandomState(0) >>> # Masks for each ground truth box (relative to the image) >>> gt_masks_data = rng.rand(3, H, W) >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) >>> # Predicted positive boxes in one image >>> pos_proposals = torch.FloatTensor([ >>> [ 16.2, 5.5, 19.9, 20.9], >>> [ 17.3, 13.6, 19.3, 19.3], >>> [ 14.8, 16.4, 17.0, 23.7], >>> [ 0.0, 0.0, 16.0, 16.0], >>> [ 4.0, 0.0, 20.0, 16.0], >>> ]) >>> # For each predicted proposal, its assignment to a gt mask >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) >>> mask_targets = mask_target_single( >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ device = pos_proposals.device mask_size = _pair(cfg.mask_size) binarize = not cfg.get('soft_mask_target', False) num_pos = pos_proposals.size(0) if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() maxh, maxw = gt_masks.height, gt_masks.width proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() mask_targets = gt_masks.crop_and_resize( proposals_np, mask_size, device=device, inds=pos_assigned_gt_inds, binarize=binarize).to_ndarray() mask_targets = torch.from_numpy(mask_targets).float().to(device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets
5,115
38.96875
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/mask/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import pycocotools.mask as mask_util def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array. """ mask_polys_list = [] for img_id in range(len(polys)): polys_single = polys[img_id] polys_lens_single = poly_lens[img_id].tolist() polys_per_mask_single = polys_per_mask[img_id].tolist() split_polys = mmcv.slice_list(polys_single, polys_lens_single) mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) mask_polys_list.append(mask_polys) return mask_polys_list # TODO: move this function to more proper place def encode_mask_results(mask_results): """Encode bitmap mask to RLE code. Args: mask_results (list | tuple[list]): bitmap mask results. In mask scoring rcnn, mask_results is a tuple of (segm_results, segm_cls_score). Returns: list | tuple: RLE encoded mask. """ if isinstance(mask_results, tuple): # mask scoring cls_segms, cls_mask_scores = mask_results else: cls_segms = mask_results num_classes = len(cls_segms) encoded_mask_results = [[] for _ in range(num_classes)] for i in range(len(cls_segms)): for cls_segm in cls_segms[i]: encoded_mask_results[i].append( mask_util.encode( np.array( cls_segm[:, :, np.newaxis], order='F', dtype='uint8'))[0]) # encoded with RLE if isinstance(mask_results, tuple): return encoded_mask_results, cls_mask_scores else: return encoded_mask_results
2,339
35
75
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/mask/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .mask_target import mask_target from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks from .utils import encode_mask_results, split_combined_polys __all__ = [ 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', 'PolygonMasks', 'encode_mask_results' ]
351
34.2
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/set_epoch_info_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner import HOOKS, Hook @HOOKS.register_module() class SetEpochInfoHook(Hook): """Set runner's epoch information to the model.""" def before_train_epoch(self, runner): epoch = runner.epoch model = runner.model if is_module_wrapper(model): model = model.module model.set_epoch(epoch)
442
26.6875
54
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/sync_random_size_hook.py
# Copyright (c) OpenMMLab. All rights reserved. import random import warnings import torch from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import distributed as dist @HOOKS.register_module() class SyncRandomSizeHook(Hook): """Change and synchronize the random image size across ranks. SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve similar functions. Such as `dict(type='Resize', img_scale=[(448, 448), (832, 832)], multiscale_mode='range', keep_ratio=True)`. Note: Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval. Args: ratio_range (tuple[int]): Random ratio range. It will be multiplied by 32, and then change the dataset output image size. Default: (14, 26). img_scale (tuple[int]): Size of input image. Default: (640, 640). interval (int): The epoch interval of change image size. Default: 1. device (torch.device | str): device for returned tensors. Default: 'cuda'. """ def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'): warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. ' 'Please use Resize pipeline to achieve similar ' 'functions. Due to the multi-process dataloader, ' 'its behavior is different from YOLOX\'s official ' 'implementation, the official is to change the size ' 'every fixed iteration interval and what we achieved ' 'is a fixed epoch interval.') self.rank, world_size = get_dist_info() self.is_distributed = world_size > 1 self.ratio_range = ratio_range self.img_scale = img_scale self.interval = interval self.device = device def after_train_epoch(self, runner): """Change the dataset output image size.""" if self.ratio_range is not None and (runner.epoch + 1) % self.interval == 0: # Due to DDP and DP get the device behavior inconsistent, # so we did not get the device from runner.model. tensor = torch.LongTensor(2).to(self.device) if self.rank == 0: size_factor = self.img_scale[1] * 1. / self.img_scale[0] size = random.randint(*self.ratio_range) size = (int(32 * size), 32 * int(size * size_factor)) tensor[0] = size[0] tensor[1] = size[1] if self.is_distributed: dist.barrier() dist.broadcast(tensor, 0) runner.data_loader.dataset.update_dynamic_scale( (tensor[0].item(), tensor[1].item()))
3,061
40.945205
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/yolox_lrupdater_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import HOOKS from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, annealing_cos) @HOOKS.register_module() class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): """YOLOX learning rate scheme. There are two main differences between YOLOXLrUpdaterHook and CosineAnnealingLrUpdaterHook. 1. When the current running epoch is greater than `max_epoch-last_epoch`, a fixed learning rate will be used 2. The exp warmup scheme is different with LrUpdaterHook in MMCV Args: num_last_epochs (int): The number of epochs with a fixed learning rate before the end of the training. """ def __init__(self, num_last_epochs, **kwargs): self.num_last_epochs = num_last_epochs super(YOLOXLrUpdaterHook, self).__init__(**kwargs) def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): # exp warmup scheme k = self.warmup_ratio * pow( (cur_iters + 1) / float(self.warmup_iters), 2) warmup_lr = [_lr * k for _lr in regular_lr] return warmup_lr if isinstance(self.base_lr, dict): lr_groups = {} for key, base_lr in self.base_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.base_lr) def get_lr(self, runner, base_lr): last_iter = len(runner.data_loader) * self.num_last_epochs if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters progress += 1 if self.min_lr_ratio is not None: target_lr = base_lr * self.min_lr_ratio else: target_lr = self.min_lr if progress >= max_progress - last_iter: # fixed learning rate return target_lr else: return annealing_cos( base_lr, target_lr, (progress - self.warmup_iters) / (max_progress - self.warmup_iters - last_iter))
2,310
32.985294
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/checkloss_hook.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class CheckInvalidLossHook(Hook): """Check invalid loss hook. This hook will regularly check whether the loss is valid during training. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval=50): self.interval = interval def after_train_iter(self, runner): if self.every_n_iters(runner, self.interval): assert torch.isfinite(runner.outputs['loss']), \ runner.logger.info('loss become infinite or NaN!')
681
26.28
66
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/sync_norm_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import nn from ..utils.dist_utils import all_reduce_dict def get_norm_states(module): async_norm_states = OrderedDict() for name, child in module.named_modules(): if isinstance(child, nn.modules.batchnorm._NormBase): for k, v in child.state_dict().items(): async_norm_states['.'.join([name, k])] = v return async_norm_states @HOOKS.register_module() class SyncNormHook(Hook): """Synchronize Norm states after training epoch, currently used in YOLOX. Args: num_last_epochs (int): The number of latter epochs in the end of the training to switch to synchronizing norm interval. Default: 15. interval (int): Synchronizing norm interval. Default: 1. """ def __init__(self, num_last_epochs=15, interval=1): self.interval = interval self.num_last_epochs = num_last_epochs def before_train_epoch(self, runner): epoch = runner.epoch if (epoch + 1) == runner.max_epochs - self.num_last_epochs: # Synchronize norm every epoch. self.interval = 1 def after_train_epoch(self, runner): """Synchronizing norm.""" epoch = runner.epoch module = runner.model if (epoch + 1) % self.interval == 0: _, world_size = get_dist_info() if world_size == 1: return norm_states = get_norm_states(module) if len(norm_states) == 0: return norm_states = all_reduce_dict(norm_states, op='mean') module.load_state_dict(norm_states, strict=False)
1,789
32.773585
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/yolox_mode_switch_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off the mosaic and mixup data augmentation and switches to use L1 loss in bbox_head. Args: num_last_epochs (int): The number of latter epochs in the end of the training to close the data augmentation and switch to L1 loss. Default: 15. skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp') """ def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): self.num_last_epochs = num_last_epochs self.skip_type_keys = skip_type_keys self._restart_dataloader = False def before_train_epoch(self, runner): """Close mosaic and mixup augmentation and switches to use L1 loss.""" epoch = runner.epoch train_loader = runner.data_loader model = runner.model if is_module_wrapper(model): model = model.module if (epoch + 1) == runner.max_epochs - self.num_last_epochs: runner.logger.info('No mosaic and mixup aug now!') # The dataset pipeline cannot be updated when persistent_workers # is True, so we need to force the dataloader's multi-process # restart. This is a very hacky approach. train_loader.dataset.update_skip_type_keys(self.skip_type_keys) if hasattr(train_loader, 'persistent_workers' ) and train_loader.persistent_workers is True: train_loader._DataLoader__initialized = False train_loader._iterator = None self._restart_dataloader = True runner.logger.info('Add additional L1 loss now!') model.bbox_head.use_l1 = True else: # Once the restart is complete, we need to restore # the initialization flag. if self._restart_dataloader: train_loader._DataLoader__initialized = True
2,270
41.849057
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook from .set_epoch_info_hook import SetEpochInfoHook from .sync_norm_hook import SyncNormHook from .sync_random_size_hook import SyncRandomSizeHook from .yolox_lrupdater_hook import YOLOXLrUpdaterHook from .yolox_mode_switch_hook import YOLOXModeSwitchHook __all__ = [ 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook', 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook', 'CheckInvalidLossHook', 'SetEpochInfoHook' ]
610
39.733333
72
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/hook/ema.py
# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook class BaseEMAHook(Hook): """Exponential Moving Average Hook. Use Exponential Moving Average on all parameters of model in training process. All parameters have a ema backup, which update by the formula as below. EMAHook takes priority over EvalHook and CheckpointHook. Note, the original model parameters are actually saved in ema field after train. Args: momentum (float): The momentum used for updating ema parameter. Ema's parameter are updated with the formula: `ema_param = (1-momentum) * ema_param + momentum * cur_param`. Defaults to 0.0002. skip_buffers (bool): Whether to skip the model buffers, such as batchnorm running stats (running_mean, running_var), it does not perform the ema operation. Default to False. interval (int): Update ema parameter every interval iteration. Defaults to 1. resume_from (str, optional): The checkpoint path. Defaults to None. momentum_fun (func, optional): The function to change momentum during early iteration (also warmup) to help early training. It uses `momentum` as a constant. Defaults to None. """ def __init__(self, momentum=0.0002, interval=1, skip_buffers=False, resume_from=None, momentum_fun=None): assert 0 < momentum < 1 self.momentum = momentum self.skip_buffers = skip_buffers self.interval = interval self.checkpoint = resume_from self.momentum_fun = momentum_fun def before_run(self, runner): """To resume model with it's ema parameters more friendly. Register ema parameter as ``named_buffer`` to model. """ model = runner.model if is_module_wrapper(model): model = model.module self.param_ema_buffer = {} if self.skip_buffers: self.model_parameters = dict(model.named_parameters()) else: self.model_parameters = model.state_dict() for name, value in self.model_parameters.items(): # "." is not allowed in module's buffer name buffer_name = f"ema_{name.replace('.', '_')}" self.param_ema_buffer[name] = buffer_name model.register_buffer(buffer_name, value.data.clone()) self.model_buffers = dict(model.named_buffers()) if self.checkpoint is not None: runner.resume(self.checkpoint) def get_momentum(self, runner): return self.momentum_fun(runner.iter) if self.momentum_fun else \ self.momentum def after_train_iter(self, runner): """Update ema parameter every self.interval iterations.""" if (runner.iter + 1) % self.interval != 0: return momentum = self.get_momentum(runner) for name, parameter in self.model_parameters.items(): # exclude num_tracking if parameter.dtype.is_floating_point: buffer_name = self.param_ema_buffer[name] buffer_parameter = self.model_buffers[buffer_name] buffer_parameter.mul_(1 - momentum).add_( parameter.data, alpha=momentum) def after_train_epoch(self, runner): """We load parameter values from ema backup to model before the EvalHook.""" self._swap_ema_parameters() def before_train_epoch(self, runner): """We recover model's parameter from ema backup after last epoch's EvalHook.""" self._swap_ema_parameters() def _swap_ema_parameters(self): """Swap the parameter of model with parameter in ema_buffer.""" for name, value in self.model_parameters.items(): temp = value.data.clone() ema_buffer = self.model_buffers[self.param_ema_buffer[name]] value.data.copy_(ema_buffer.data) ema_buffer.data.copy_(temp) @HOOKS.register_module() class ExpMomentumEMAHook(BaseEMAHook): """EMAHook using exponential momentum strategy. Args: total_iter (int): The total number of iterations of EMA momentum. Defaults to 2000. """ def __init__(self, total_iter=2000, **kwargs): super(ExpMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-( 1 + x) / total_iter) + self.momentum @HOOKS.register_module() class LinearMomentumEMAHook(BaseEMAHook): """EMAHook using linear momentum strategy. Args: warm_up (int): During first warm_up steps, we may use smaller decay to update ema parameters more slowly. Defaults to 100. """ def __init__(self, warm_up=100, **kwargs): super(LinearMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = lambda x: min(self.momentum**self.interval, (1 + x) / (warm_up + x))
5,150
38.320611
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/export/model_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings import numpy as np import torch from mmdet.core import bbox2result from mmdet.models import BaseDetector class DeployBaseDetector(BaseDetector): """DeployBaseDetector.""" def __init__(self, class_names, device_id): super(DeployBaseDetector, self).__init__() self.CLASSES = class_names self.device_id = device_id def simple_test(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def aug_test(self, imgs, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def extract_feat(self, imgs): raise NotImplementedError('This method is not implemented.') def forward_train(self, imgs, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def val_step(self, data, optimizer): raise NotImplementedError('This method is not implemented.') def train_step(self, data, optimizer): raise NotImplementedError('This method is not implemented.') def forward_test(self, *, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def forward(self, img, img_metas, return_loss=True, **kwargs): outputs = self.forward_test(img, img_metas, **kwargs) batch_dets, batch_labels = outputs[:2] batch_masks = outputs[2] if len(outputs) == 3 else None batch_size = img[0].shape[0] img_metas = img_metas[0] results = [] rescale = kwargs.get('rescale', True) for i in range(batch_size): dets, labels = batch_dets[i], batch_labels[i] if rescale: scale_factor = img_metas[i]['scale_factor'] if isinstance(scale_factor, (list, tuple, np.ndarray)): assert len(scale_factor) == 4 scale_factor = np.array(scale_factor)[None, :] # [1,4] dets[:, :4] /= scale_factor if 'border' in img_metas[i]: # offset pixel of the top-left corners between original image # and padded/enlarged image, 'border' is used when exporting # CornerNet and CentripetalNet to onnx x_off = img_metas[i]['border'][2] y_off = img_metas[i]['border'][0] dets[:, [0, 2]] -= x_off dets[:, [1, 3]] -= y_off dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) dets_results = bbox2result(dets, labels, len(self.CLASSES)) if batch_masks is not None: masks = batch_masks[i] img_h, img_w = img_metas[i]['img_shape'][:2] ori_h, ori_w = img_metas[i]['ori_shape'][:2] masks = masks[:, :img_h, :img_w] if rescale: masks = masks.astype(np.float32) masks = torch.from_numpy(masks) masks = torch.nn.functional.interpolate( masks.unsqueeze(0), size=(ori_h, ori_w)) masks = masks.squeeze(0).detach().numpy() if masks.dtype != np.bool: masks = masks >= 0.5 segms_results = [[] for _ in range(len(self.CLASSES))] for j in range(len(dets)): segms_results[labels[j]].append(masks[j]) results.append((dets_results, segms_results)) else: results.append(dets_results) return results class ONNXRuntimeDetector(DeployBaseDetector): """Wrapper for detector's inference with ONNXRuntime.""" def __init__(self, onnx_file, class_names, device_id): super(ONNXRuntimeDetector, self).__init__(class_names, device_id) import onnxruntime as ort # get the custom op path ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with ONNXRuntime from source.') session_options = ort.SessionOptions() # register custom op for onnxruntime if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) sess = ort.InferenceSession(onnx_file, session_options) providers = ['CPUExecutionProvider'] options = [{}] is_cuda_available = ort.get_device() == 'GPU' if is_cuda_available: providers.insert(0, 'CUDAExecutionProvider') options.insert(0, {'device_id': device_id}) sess.set_providers(providers, options) self.sess = sess self.io_binding = sess.io_binding() self.output_names = [_.name for _ in sess.get_outputs()] self.is_cuda_available = is_cuda_available def forward_test(self, imgs, img_metas, **kwargs): input_data = imgs[0] # set io binding for inputs/outputs device_type = 'cuda' if self.is_cuda_available else 'cpu' if not self.is_cuda_available: input_data = input_data.cpu() self.io_binding.bind_input( name='input', device_type=device_type, device_id=self.device_id, element_type=np.float32, shape=input_data.shape, buffer_ptr=input_data.data_ptr()) for name in self.output_names: self.io_binding.bind_output(name) # run session to get outputs self.sess.run_with_iobinding(self.io_binding) ort_outputs = self.io_binding.copy_outputs_to_cpu() return ort_outputs class TensorRTDetector(DeployBaseDetector): """Wrapper for detector's inference with TensorRT.""" def __init__(self, engine_file, class_names, device_id, output_names=None): super(TensorRTDetector, self).__init__(class_names, device_id) warnings.warn('`output_names` is deprecated and will be removed in ' 'future releases.') from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin try: load_tensorrt_plugin() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with TensorRT from source.') output_names = ['dets', 'labels'] model = TRTWraper(engine_file, ['input'], output_names) with_masks = False # if TensorRT has totally 4 inputs/outputs, then # the detector should have `mask` output. if len(model.engine) == 4: model.output_names = output_names + ['masks'] with_masks = True self.model = model self.with_masks = with_masks def forward_test(self, imgs, img_metas, **kwargs): input_data = imgs[0].contiguous() with torch.cuda.device(self.device_id), torch.no_grad(): outputs = self.model({'input': input_data}) outputs = [outputs[name] for name in self.model.output_names] outputs = [out.detach().cpu().numpy() for out in outputs] return outputs
7,472
39.61413
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/export/pytorch2onnx.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial import mmcv import numpy as np import torch from mmcv.runner import load_checkpoint def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None): """Prepare sample input and wrap model for ONNX export. The ONNX export API only accept args, and all inputs should be torch.Tensor or corresponding types (such as tuple of tensor). So we should call this function before exporting. This function will: 1. generate corresponding inputs which are used to execute the model. 2. Wrap the model's forward function. For example, the MMDet models' forward function has a parameter ``return_loss:bool``. As we want to set it as False while export API supports neither bool type or kwargs. So we have to replace the forward method like ``model.forward = partial(model.forward, return_loss=False)``. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint input_config (dict): the exactly data in this dict depends on the framework. For MMSeg, we can just declare the input shape, and generate the dummy data accordingly. However, for MMDet, we may pass the real img path, or the NMS will return None as there is no legal bbox. Returns: tuple: (model, tensor_data) wrapped model which can be called by ``model(*tensor_data)`` and a list of inputs which are used to execute the model while exporting. """ model = build_model_from_cfg( config_path, checkpoint_path, cfg_options=cfg_options) one_img, one_meta = preprocess_example_input(input_config) tensor_data = [one_img] model.forward = partial( model.forward, img_metas=[[one_meta]], return_loss=False) # pytorch has some bug in pytorch1.3, we have to fix it # by replacing these existing op opset_version = 11 # put the import within the function thus it will not cause import error # when not using this function try: from mmcv.onnx.symbolic import register_extra_symbolics except ModuleNotFoundError: raise NotImplementedError('please update mmcv to version>=v1.0.4') register_extra_symbolics(opset_version) return model, tensor_data def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): """Build a model from config and load the given checkpoint. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint Returns: torch.nn.Module: the built model """ from mmdet.models import build_detector cfg = mmcv.Config.fromfile(config_path) if cfg_options is not None: cfg.merge_from_dict(cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the model cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: from mmdet.datasets import DATASETS dataset = DATASETS.get(cfg.data.test['type']) assert (dataset is not None) model.CLASSES = dataset.CLASSES model.cpu().eval() return model def preprocess_example_input(input_config): """Prepare an example input image for ``generate_inputs_and_wrap_model``. Args: input_config (dict): customized config describing the example input. Returns: tuple: (one_img, one_meta), tensor of the example input image and \ meta information for the example input image. Examples: >>> from mmdet.core.export import preprocess_example_input >>> input_config = { >>> 'input_shape': (1,3,224,224), >>> 'input_path': 'demo/demo.jpg', >>> 'normalize_cfg': { >>> 'mean': (123.675, 116.28, 103.53), >>> 'std': (58.395, 57.12, 57.375) >>> } >>> } >>> one_img, one_meta = preprocess_example_input(input_config) >>> print(one_img.shape) torch.Size([1, 3, 224, 224]) >>> print(one_meta) {'img_shape': (224, 224, 3), 'ori_shape': (224, 224, 3), 'pad_shape': (224, 224, 3), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} """ input_path = input_config['input_path'] input_shape = input_config['input_shape'] one_img = mmcv.imread(input_path) one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) show_img = one_img.copy() if 'normalize_cfg' in input_config.keys(): normalize_cfg = input_config['normalize_cfg'] mean = np.array(normalize_cfg['mean'], dtype=np.float32) std = np.array(normalize_cfg['std'], dtype=np.float32) to_rgb = normalize_cfg.get('to_rgb', True) one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) one_img = one_img.transpose(2, 0, 1) one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( True) (_, C, H, W) = input_shape one_meta = { 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': np.ones(4, dtype=np.float32), 'flip': False, 'show_img': show_img, 'flip_direction': None } return one_img, one_meta
5,995
36.475
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/export/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx, get_k_for_topk) from .pytorch2onnx import (build_model_from_cfg, generate_inputs_and_wrap_model, preprocess_example_input) __all__ = [ 'build_model_from_cfg', 'generate_inputs_and_wrap_model', 'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx', 'dynamic_clip_for_onnx' ]
505
37.923077
75
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/export/onnx_helper.py
# Copyright (c) OpenMMLab. All rights reserved. import os import torch def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape): """Clip boxes dynamically for onnx. Since torch.clamp cannot have dynamic `min` and `max`, we scale the boxes by 1/max_shape and clamp in the range [0, 1]. Args: x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor or torch.Size): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2. """ assert isinstance( max_shape, torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx' # scale by 1/max_shape x1 = x1 / max_shape[1] y1 = y1 / max_shape[0] x2 = x2 / max_shape[1] y2 = y2 / max_shape[0] # clamp [0, 1] x1 = torch.clamp(x1, 0, 1) y1 = torch.clamp(y1, 0, 1) x2 = torch.clamp(x2, 0, 1) y2 = torch.clamp(y2, 0, 1) # scale back x1 = x1 * max_shape[1] y1 = y1 * max_shape[0] x2 = x2 * max_shape[1] y2 = y2 * max_shape[0] return x1, y1, x2, y2 def get_k_for_topk(k, size): """Get k of TopK for onnx exporting. The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime it could be a Tensor.Due to dynamic shape feature, we have to decide whether to do TopK and what K it should be while exporting to ONNX. If returned K is less than zero, it means we do not have to do TopK operation. Args: k (int or Tensor): The set k value for nms from config file. size (Tensor or torch.Size): The number of elements of \ TopK's input tensor Returns: tuple: (int or Tensor): The final K for TopK. """ ret_k = -1 if k <= 0 or size <= 0: return ret_k if torch.onnx.is_in_onnx_export(): is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' if is_trt_backend: # TensorRT does not support dynamic K with TopK op if 0 < k < size: ret_k = k else: # Always keep topk op for dynamic input in onnx for ONNX Runtime ret_k = torch.where(k < size, k, size) elif k < size: ret_k = k else: # ret_k is -1 pass return ret_k def add_dummy_nms_for_onnx(boxes, scores, max_output_boxes_per_class=1000, iou_threshold=0.5, score_threshold=0.05, pre_top_k=-1, after_top_k=-1, labels=None): """Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. This function helps exporting to onnx with batch and multiclass NMS op. It only supports class-agnostic detection results. That is, the scores is of shape (N, num_bboxes, num_classes) and the boxes is of shape (N, num_boxes, 4). Args: boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4] scores (Tensor): The detection scores of shape [N, num_boxes, num_classes] max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5 score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (bool): Number of top K boxes to keep before nms. Defaults to -1. after_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. labels (Tensor, optional): It not None, explicit labels would be used. Otherwise, labels would be automatically generated using num_classed. Defaults to None. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class]) iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32) score_threshold = torch.tensor([score_threshold], dtype=torch.float32) batch_size = scores.shape[0] num_class = scores.shape[2] nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long) nms_pre = get_k_for_topk(nms_pre, boxes.shape[1]) if nms_pre > 0: max_scores, _ = scores.max(-1) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange(batch_size).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = boxes.shape[1] * batch_inds + topk_inds boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape( batch_size, -1, num_class) if labels is not None: labels = labels.reshape(-1, 1)[transformed_inds].reshape( batch_size, -1) scores = scores.permute(0, 2, 1) num_box = boxes.shape[1] # turn off tracing to create a dummy output of nms state = torch._C._get_tracing_state() # dummy indices of nms's output num_fake_det = 2 batch_inds = torch.randint(batch_size, (num_fake_det, 1)) cls_inds = torch.randint(num_class, (num_fake_det, 1)) box_inds = torch.randint(num_box, (num_fake_det, 1)) indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1) output = indices setattr(DummyONNXNMSop, 'output', output) # open tracing torch._C._set_tracing_state(state) selected_indices = DummyONNXNMSop.apply(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1] box_inds = selected_indices[:, 2] if labels is None: labels = torch.arange(num_class, dtype=torch.long).to(scores.device) labels = labels.view(1, num_class, 1).expand_as(scores) scores = scores.reshape(-1, 1) boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4) pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds mask = scores.new_zeros(scores.shape) # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 # PyTorch style code: mask[batch_inds, box_inds] += 1 mask[pos_inds, :] += 1 scores = scores * mask boxes = boxes * mask scores = scores.reshape(batch_size, -1) boxes = boxes.reshape(batch_size, -1, 4) labels = labels.reshape(batch_size, -1) nms_after = torch.tensor( after_top_k, device=scores.device, dtype=torch.long) nms_after = get_k_for_topk(nms_after, num_box * num_class) if nms_after > 0: _, topk_inds = scores.topk(nms_after) batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds) # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = scores.shape[1] * batch_inds + topk_inds scores = scores.reshape(-1, 1)[transformed_inds, :].reshape( batch_size, -1) boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) labels = labels.reshape(-1, 1)[transformed_inds, :].reshape( batch_size, -1) scores = scores.unsqueeze(2) dets = torch.cat([boxes, scores], dim=2) return dets, labels class DummyONNXNMSop(torch.autograd.Function): """DummyONNXNMSop. This class is only for creating onnx::NonMaxSuppression. """ @staticmethod def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): return DummyONNXNMSop.output @staticmethod def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): return g.op( 'NonMaxSuppression', boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, outputs=1)
8,367
36.357143
98
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/demodata.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.utils.util_random import ensure_rng def random_boxes(num=1, scale=1, rng=None): """Simple version of ``kwimage.Boxes.random`` Returns: Tensor: shape (n, 4) in x1, y1, x2, y2 format. References: https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 Example: >>> num = 3 >>> scale = 512 >>> rng = 0 >>> boxes = random_boxes(num, scale, rng) >>> print(boxes) tensor([[280.9925, 278.9802, 308.6148, 366.1769], [216.9113, 330.6978, 224.0446, 456.5878], [405.3632, 196.3221, 493.3953, 270.7942]]) """ rng = ensure_rng(rng) tlbr = rng.rand(num, 4).astype(np.float32) tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) tlbr[:, 0] = tl_x * scale tlbr[:, 1] = tl_y * scale tlbr[:, 2] = br_x * scale tlbr[:, 3] = br_y * scale boxes = torch.from_numpy(tlbr) return boxes
1,181
26.488372
101
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder, PseudoBBoxCoder, TBLRBBoxCoder) from .iou_calculators import BboxOverlaps2D, bbox_overlaps from .samplers import (BaseSampler, CombinedSampler, InstanceBalancedPosSampler, IoUBalancedNegSampler, OHEMSampler, PseudoSampler, RandomSampler, SamplingResult, ScoreHLRSampler) from .transforms import (bbox2distance, bbox2result, bbox2roi, bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, distance2bbox, find_inside_bboxes, roi2bbox) __all__ = [ 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner', 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance', 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder', 'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes' ]
1,688
57.241379
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/builder.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg BBOX_ASSIGNERS = Registry('bbox_assigner') BBOX_SAMPLERS = Registry('bbox_sampler') BBOX_CODERS = Registry('bbox_coder') def build_assigner(cfg, **default_args): """Builder of box assigner.""" return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) def build_sampler(cfg, **default_args): """Builder of box sampler.""" return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) def build_bbox_coder(cfg, **default_args): """Builder of box coder.""" return build_from_cfg(cfg, BBOX_CODERS, default_args)
628
27.590909
60
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/transforms.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch def find_inside_bboxes(bboxes, img_h, img_w): """Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes. """ inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) return inside_inds def bbox_flip(bboxes, img_shape, direction='horizontal'): """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (tuple): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped def bbox_mapping(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) return new_bboxes def bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from testing scale to original image scale.""" new_bboxes = bbox_flip(bboxes, img_shape, flip_direction) if flip else bboxes new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) return new_bboxes.view(bboxes.shape) def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): if bboxes.size(0) > 0: img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) else: rois = bboxes.new_zeros((0, 5)) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois): """Convert rois to bounding box format. Args: rois (torch.Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: list[torch.Tensor]: Converted boxes of corresponding rois. """ bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list def bbox2result(bboxes, labels, num_classes): """Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """ if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ x1 = points[..., 0] - distance[..., 0] y1 = points[..., 1] - distance[..., 1] x2 = points[..., 0] + distance[..., 2] y2 = points[..., 1] + distance[..., 3] bboxes = torch.stack([x1, y1, x2, y2], -1) if max_shape is not None: if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): # speed up bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) return bboxes # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes def bbox2distance(points, bbox, max_dis=None, eps=0.1): """Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2), [x, y]. bbox (Tensor): Shape (n, 4), "xyxy" format max_dis (float): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. """ left = points[:, 0] - bbox[:, 0] top = points[:, 1] - bbox[:, 1] right = bbox[:, 2] - points[:, 0] bottom = bbox[:, 3] - points[:, 1] if max_dis is not None: left = left.clamp(min=0, max=max_dis - eps) top = top.clamp(min=0, max=max_dis - eps) right = right.clamp(min=0, max=max_dis - eps) bottom = bottom.clamp(min=0, max=max_dis - eps) return torch.stack([left, top, right, bottom], -1) def bbox_rescale(bboxes, scale_factor=1.0): """Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. """ if bboxes.size(1) == 5: bboxes_ = bboxes[:, 1:] inds_ = bboxes[:, 0] else: bboxes_ = bboxes cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 w = bboxes_[:, 2] - bboxes_[:, 0] h = bboxes_[:, 3] - bboxes_[:, 1] w = w * scale_factor h = h * scale_factor x1 = cx - 0.5 * w x2 = cx + 0.5 * w y1 = cy - 0.5 * h y2 = cy + 0.5 * h if bboxes.size(1) == 5: rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) else: rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return rescaled_bboxes def bbox_cxcywh_to_xyxy(bbox): """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] return torch.cat(bbox_new, dim=-1) def bbox_xyxy_to_cxcywh(bbox): """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] return torch.cat(bbox_new, dim=-1)
8,962
32.073801
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/assign_result.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.utils import util_mixins class AssignResult(util_mixins.NiceRepr): """Stores assignments between predicted and truth boxes. Attributes: num_gts (int): the number of truth boxes considered when computing this assignment gt_inds (LongTensor): for each predicted box indicates the 1-based index of the assigned truth box. 0 means unassigned and -1 means ignore. max_overlaps (FloatTensor): the iou between the predicted box and its assigned truth box. labels (None | LongTensor): If specified, for each predicted box indicates the category label of the assigned truth box. Example: >>> # An assign result between 4 predicted boxes and 9 true boxes >>> # where only two boxes were assigned. >>> num_gts = 9 >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) >>> labels = torch.LongTensor([0, 3, 4, 0]) >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,), labels.shape=(4,))> >>> # Force addition of gt labels (when adding gt as proposals) >>> new_labels = torch.LongTensor([3, 4, 5]) >>> self.add_gt_(new_labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,), labels.shape=(7,))> """ def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels # Interface for possible user-defined properties self._extra_properties = {} @property def num_preds(self): """int: the number of predictions in this assignment""" return len(self.gt_inds) def set_extra_property(self, key, value): """Set user-defined new property.""" assert key not in self.info self._extra_properties[key] = value def get_extra_property(self, key): """Get user-defined property.""" return self._extra_properties.get(key, None) @property def info(self): """dict: a dictionary of info about the object""" basic_info = { 'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels, } basic_info.update(self._extra_properties) return basic_info def __nice__(self): """str: a "nice" summary string describing this assign result""" parts = [] parts.append(f'num_gts={self.num_gts!r}') if self.gt_inds is None: parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if self.max_overlaps is None: parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append('max_overlaps.shape=' f'{tuple(self.max_overlaps.shape)!r}') if self.labels is None: parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): """Create random AssignResult for tests or debugging. Args: num_preds: number of predicted boxes num_gts: number of true boxes p_ignore (float): probability of a predicted box assigned to an ignored truth p_assigned (float): probability of a predicted box not being assigned p_use_label (float | bool): with labels or not rng (None | int | numpy.random.RandomState): seed or state Returns: :obj:`AssignResult`: Randomly generated assign results. Example: >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA >>> self = AssignResult.random() >>> print(self.info) """ from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if num_gts is None: num_gts = rng.randint(0, 8) if num_preds is None: num_preds = rng.randint(0, 16) if num_gts == 0: max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if p_use_label is True or p_use_label < rng.rand(): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np # Create an overlap for each predicted box max_overlaps = torch.from_numpy(rng.rand(num_preds)) # Construct gt_inds for each predicted box is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) # maximum number of assignments constraints n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = torch.from_numpy( rng.rand(num_preds) < p_ignore) & is_assigned gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned] gt_inds = torch.from_numpy( rng.randint(1, num_gts + 1, size=num_preds)) gt_inds[is_ignore] = -1 gt_inds[~is_assigned] = 0 max_overlaps[~is_assigned] = 0 if p_use_label is True or p_use_label < rng.rand(): if num_classes == 0: labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy( # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class rng.randint(0, num_classes, size=num_preds)) labels[~is_assigned] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): """Add ground truth as assigned results. Args: gt_labels (torch.Tensor): Labels of gt boxes """ self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if self.labels is not None: self.labels = torch.cat([gt_labels, self.labels])
7,753
36.640777
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/sim_ota_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn.functional as F from ..builder import BBOX_ASSIGNERS from ..iou_calculators import bbox_overlaps from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class SimOTAAssigner(BaseAssigner): """Computes matching between predictions and ground truth. Args: center_radius (int | float, optional): Ground truth center size to judge whether a prior is in center. Default 2.5. candidate_topk (int, optional): The candidate top-k which used to get top-k ious to calculate dynamic-k. Default 10. iou_weight (int | float, optional): The scale factor for regression iou cost. Default 3.0. cls_weight (int | float, optional): The scale factor for classification cost. Default 1.0. """ def __init__(self, center_radius=2.5, candidate_topk=10, iou_weight=3.0, cls_weight=1.0): self.center_radius = center_radius self.candidate_topk = candidate_topk self.iou_weight = iou_weight self.cls_weight = cls_weight def assign(self, pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore=None, eps=1e-7): """Assign gt to priors using SimOTA. It will switch to CPU mode when GPU is out of memory. Args: pred_scores (Tensor): Classification scores of one image, a 2D-Tensor with shape [num_priors, num_classes] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. eps (float): A value added to the denominator for numerical stability. Default 1e-7. Returns: assign_result (obj:`AssignResult`): The assigned result. """ try: assign_result = self._assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore, eps) return assign_result except RuntimeError: origin_device = pred_scores.device warnings.warn('OOM RuntimeError is raised due to the huge memory ' 'cost during label assignment. CPU mode is applied ' 'in this batch. If you want to avoid this issue, ' 'try to reduce the batch size or image size.') torch.cuda.empty_cache() pred_scores = pred_scores.cpu() priors = priors.cpu() decoded_bboxes = decoded_bboxes.cpu() gt_bboxes = gt_bboxes.cpu().float() gt_labels = gt_labels.cpu() assign_result = self._assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore, eps) assign_result.gt_inds = assign_result.gt_inds.to(origin_device) assign_result.max_overlaps = assign_result.max_overlaps.to( origin_device) assign_result.labels = assign_result.labels.to(origin_device) return assign_result def _assign(self, pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore=None, eps=1e-7): """Assign gt to priors using SimOTA. Args: pred_scores (Tensor): Classification scores of one image, a 2D-Tensor with shape [num_priors, num_classes] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. eps (float): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ INF = 100000000 num_gt = gt_bboxes.size(0) num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( priors, gt_bboxes) valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_gt == 0 or num_bboxes == 0 or num_valid == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + eps) gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) cls_cost = F.binary_cross_entropy( valid_pred_scores.sqrt_(), gt_onehot_label, reduction='none').sum(-1) cost_matrix = ( cls_cost * self.cls_weight + iou_cost * self.iou_weight + (~is_in_boxes_and_center) * INF) matched_pred_ious, matched_gt_inds = \ self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def get_in_gt_and_in_center_info(self, priors, gt_bboxes): num_gt = gt_bboxes.size(0) repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) # is prior centers in gt bboxes, shape: [n_prior, n_gt] l_ = repeated_x - gt_bboxes[:, 0] t_ = repeated_y - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - repeated_x b_ = gt_bboxes[:, 3] - repeated_y deltas = torch.stack([l_, t_, r_, b_], dim=1) is_in_gts = deltas.min(dim=1).values > 0 is_in_gts_all = is_in_gts.sum(dim=1) > 0 # is prior centers in gt centers gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 ct_box_l = gt_cxs - self.center_radius * repeated_stride_x ct_box_t = gt_cys - self.center_radius * repeated_stride_y ct_box_r = gt_cxs + self.center_radius * repeated_stride_x ct_box_b = gt_cys + self.center_radius * repeated_stride_y cl_ = repeated_x - ct_box_l ct_ = repeated_y - ct_box_t cr_ = ct_box_r - repeated_x cb_ = ct_box_b - repeated_y ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) is_in_cts = ct_deltas.min(dim=1).values > 0 is_in_cts_all = is_in_cts.sum(dim=1) > 0 # in boxes or in centers, shape: [num_priors] is_in_gts_or_centers = is_in_gts_all | is_in_cts_all # both in boxes and centers, shape: [num_fg, num_gt] is_in_boxes_and_centers = ( is_in_gts[is_in_gts_or_centers, :] & is_in_cts[is_in_gts_or_centers, :]) return is_in_gts_or_centers, is_in_boxes_and_centers def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask): matching_matrix = torch.zeros_like(cost) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False) matching_matrix[:, gt_idx][pos_idx] = 1.0 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0.0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1.0 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0.0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds
11,381
43.635294
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/atss_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class ATSSAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (float): number of bbox selected in each level """ def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D'), ignore_iof_thr=-1): self.topk = topk self.iou_calculator = build_iou_calculator(iou_calculator) self.ignore_iof_thr = ignore_iof_thr # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py def assign(self, bboxes, num_level_bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. The assignment is done in following steps 1. compute iou between all bbox (bbox of all pyramid levels) and gt 2. compute center distance between all bbox and gt 3. on each pyramid level, for each gt, select k bbox whose center are closest to the gt center, so we total select k*l bbox as candidates for each gt 4. get corresponding iou for the these candidates, and compute the mean and std, set mean + std as the iou threshold 5. select these candidates whose iou are greater than or equal to the threshold as positive 6. limit the positive sample's center in gt Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). num_level_bboxes (List): num of bboxes in each level gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ INF = 100000000 bboxes = bboxes[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) # compute iou between all bbox and gt overlaps = self.iou_calculator(bboxes, gt_bboxes) # assign 0 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) # compute center distance between all bbox and gt gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 gt_points = torch.stack((gt_cx, gt_cy), dim=1) bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) distances = (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr distances[ignore_idxs, :] = INF assigned_gt_inds[ignore_idxs] = -1 # Selecting candidates based on the center distance candidate_idxs = [] start_idx = 0 for level, bboxes_per_level in enumerate(num_level_bboxes): # on each pyramid level, for each gt, # select k bbox whose center are closest to the gt center end_idx = start_idx + bboxes_per_level distances_per_level = distances[start_idx:end_idx, :] selectable_k = min(self.topk, bboxes_per_level) _, topk_idxs_per_level = distances_per_level.topk( selectable_k, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + start_idx) start_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # get corresponding iou for the these candidates, and compute the # mean and std, set mean + std as the iou threshold candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] overlaps_mean_per_gt = candidate_overlaps.mean(0) overlaps_std_per_gt = candidate_overlaps.std(0) overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] # limit the positive sample's center in gt for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_bboxes_cx = bboxes_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_bboxes_cy = bboxes_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest IoU will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
7,809
42.388889
87
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/center_region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner def scale_boxes(bboxes, scale): """Expand an array of boxes by a given scale. Args: bboxes (Tensor): Shape (m, 4) scale (float): The scale factor of bboxes Returns: (Tensor): Shape (m, 4). Scaled bboxes """ assert bboxes.size(1) == 4 w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_scaled = torch.zeros_like(bboxes) boxes_scaled[:, 0] = x_c - w_half boxes_scaled[:, 2] = x_c + w_half boxes_scaled[:, 1] = y_c - h_half boxes_scaled[:, 3] = y_c + h_half return boxes_scaled def is_located_in(points, bboxes): """Are points located in bboxes. Args: points (Tensor): Points, shape: (m, 2). bboxes (Tensor): Bounding boxes, shape: (n, 4). Return: Tensor: Flags indicating if points are located in bboxes, shape: (m, n). """ assert points.size(1) == 2 assert bboxes.size(1) == 4 return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) def bboxes_area(bboxes): """Compute the area of an array of bboxes. Args: bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) Returns: Tensor: Area of the bboxes. Shape: (m, ) """ assert bboxes.size(1) == 4 w = (bboxes[:, 2] - bboxes[:, 0]) h = (bboxes[:, 3] - bboxes[:, 1]) areas = w * h return areas @BBOX_ASSIGNERS.register_module() class CenterRegionAssigner(BaseAssigner): """Assign pixels at the center region of a bbox as positive. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: negative samples - semi-positive numbers: positive sample, index (0-based) of assigned gt Args: pos_scale (float): Threshold within which pixels are labelled as positive. neg_scale (float): Threshold above which pixels are labelled as positive. min_pos_iof (float): Minimum iof of a pixel with a gt to be labelled as positive. Default: 1e-2 ignore_gt_scale (float): Threshold within which the pixels are ignored when the gt is labelled as shadowed. Default: 0.5 foreground_dominate (bool): If True, the bbox will be assigned as positive when a gt's kernel region overlaps with another's shadowed (ignored) region, otherwise it is set as ignored. Default to False. """ def __init__(self, pos_scale, neg_scale, min_pos_iof=1e-2, ignore_gt_scale=0.5, foreground_dominate=False, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_scale = pos_scale self.neg_scale = neg_scale self.min_pos_iof = min_pos_iof self.ignore_gt_scale = ignore_gt_scale self.foreground_dominate = foreground_dominate self.iou_calculator = build_iou_calculator(iou_calculator) def get_gt_priorities(self, gt_bboxes): """Get gt priorities according to their areas. Smaller gt has higher priority. Args: gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). Returns: Tensor: The priority of gts so that gts with larger priority is \ more likely to be assigned. Shape (k, ) """ gt_areas = bboxes_area(gt_bboxes) # Rank all gt bbox areas. Smaller objects has larger priority _, sort_idx = gt_areas.sort(descending=True) sort_idx = sort_idx.argsort() return sort_idx def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assigns gts to every bbox (proposal/anchor), each bbox \ will be assigned with -1, or a semi-positive number. -1 means \ negative sample, semi-positive number is the index (0-based) of \ assigned gt. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). Returns: :obj:`AssignResult`: The assigned result. Note that \ shadowed_labels of shape (N, 2) is also added as an \ `assign_result` attribute. `shadowed_labels` is a tensor \ composed of N pairs of anchor_ind, class_label], where N \ is the number of anchors that lie in the outer region of a \ gt, anchor_ind is the shadowed anchor index and class_label \ is the shadowed class label. Example: >>> self = CenterRegionAssigner(0.2, 0.2) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ # There are in total 5 steps in the pixel assignment # 1. Find core (the center region, say inner 0.2) # and shadow (the relatively ourter part, say inner 0.2-0.5) # regions of every gt. # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in # the image. # 3.1. For overlapping objects, the prior bboxes in gt_core is # assigned with the object with smallest area # 4. Assign prior bboxes with class label according to its gt id. # 4.1. Assign -1 to prior bboxes lying in shadowed gts # 4.2. Assign positive prior boxes with the corresponding label # 5. Find pixels lying in the shadow of an object and assign them with # background label, but set the loss weight of its corresponding # gt to zero. assert bboxes.size(1) == 4, 'bboxes must have size of 4' # 1. Find core positive and shadow region of every gt gt_core = scale_boxes(gt_bboxes, self.pos_scale) gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) # 2. Find prior bboxes that lie in gt_core and gt_shadow regions bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 # The center points lie within the gt boxes is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) # Only calculate bbox and gt_core IoF. This enables small prior bboxes # to match large gts bbox_and_gt_core_overlaps = self.iou_calculator( bboxes, gt_core, mode='iof') # The center point of effective priors should be within the gt box is_bbox_in_gt_core = is_bbox_in_gt & ( bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) is_bbox_in_gt_shadow = ( self.iou_calculator(bboxes, gt_shadow, mode='iof') > self.min_pos_iof) # Rule out center effective positive pixels is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) if num_gts == 0 or num_bboxes == 0: # If no gts exist, assign all pixels to negative assigned_gt_ids = \ is_bbox_in_gt_core.new_zeros((num_bboxes,), dtype=torch.long) pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) else: # Step 3: assign a one-hot gt id to each pixel, and smaller objects # have high priority to assign the pixel. sort_idx = self.get_gt_priorities(gt_bboxes) assigned_gt_ids, pixels_in_gt_shadow = \ self.assign_one_hot_gt_indices(is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=sort_idx) if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: # No ground truth or boxes, return empty assignment gt_bboxes_ignore = scale_boxes( gt_bboxes_ignore, scale=self.ignore_gt_scale) is_bbox_in_ignored_gts = is_located_in(bbox_centers, gt_bboxes_ignore) is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) assigned_gt_ids[is_bbox_in_ignored_gts] = -1 # 4. Assign prior bboxes with class label according to its gt id. assigned_labels = None shadowed_pixel_labels = None if gt_labels is not None: # Default assigned label is the background (-1) assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_ids > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - 1] # 5. Find pixels lying in the shadow of an object shadowed_pixel_labels = pixels_in_gt_shadow.clone() if pixels_in_gt_shadow.numel() > 0: pixel_idx, gt_idx =\ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ 'Some pixels are dually assigned to ignore and gt!' shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] override = ( assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) if self.foreground_dominate: # When a pixel is both positive and shadowed, set it as pos shadowed_pixel_labels = shadowed_pixel_labels[~override] else: # When a pixel is both pos and shadowed, set it as shadowed assigned_labels[pixel_idx[override]] = -1 assigned_gt_ids[pixel_idx[override]] = 0 assign_result = AssignResult( num_gts, assigned_gt_ids, None, labels=assigned_labels) # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) assign_result.set_extra_property('shadowed_labels', shadowed_pixel_labels) return assign_result def assign_one_hot_gt_indices(self, is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=None): """Assign only one gt index to each prior box. Gts with large gt_priority are more likely to be assigned. Args: is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center is in the core area of a gt (e.g. 0-0.2). Shape: (num_prior, num_gt). is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox center is in the shadowed area of a gt (e.g. 0.2-0.5). Shape: (num_prior, num_gt). gt_priority (Tensor): Priorities of gts. The gt with a higher priority is more likely to be assigned to the bbox when the bbox match with multiple gts. Shape: (num_gt, ). Returns: tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - assigned_gt_inds: The assigned gt index of each prior bbox \ (i.e. index from 1 to num_gts). Shape: (num_prior, ). - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ shape (num_ignore, 2) with first column being the \ shadowed prior bbox indices and the second column the \ shadowed gt indices (1-based). """ num_bboxes, num_gts = is_bbox_in_gt_core.shape if gt_priority is None: gt_priority = torch.arange( num_gts, device=is_bbox_in_gt_core.device) assert gt_priority.size(0) == num_gts # The bigger gt_priority, the more preferable to be assigned # The assigned inds are by default 0 (background) assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), dtype=torch.long) # Shadowed bboxes are assigned to be background. But the corresponding # label is ignored during loss calculation, which is done through # shadowed_gt_inds shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) if is_bbox_in_gt_core.sum() == 0: # No gt match shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue return assigned_gt_inds, shadowed_gt_inds # The priority of each prior box and gt pair. If one prior box is # matched bo multiple gts. Only the pair with the highest priority # is saved pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), -1, dtype=torch.long) # Each bbox could match with multiple gts. # The following codes deal with this situation # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) # The matched gt index of each positive bbox. Length >= num_pos_anchor # , since one bbox could match multiple gts matched_bbox_gt_inds = torch.nonzero( is_bbox_in_gt_core, as_tuple=False)[:, 1] # Assign priority to each bbox-gt pair. pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] _, argmax_priority = pair_priority[inds_of_match].max(dim=1) assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based # Zero-out the assigned anchor box to filter the shadowed gt indices is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 # Concat the shadowed indices due to overlapping with that out side of # effective scale. shape: (total_num_ignore, 2) shadowed_gt_inds = torch.cat( (shadowed_gt_inds, torch.nonzero( is_bbox_in_gt_core, as_tuple=False)), dim=0) # `is_bbox_in_gt_core` should be changed back to keep arguments intact. is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` if shadowed_gt_inds.numel() > 0: shadowed_gt_inds[:, 1] += 1 return assigned_gt_inds, shadowed_gt_inds
15,477
44.928783
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import anchor_inside_flags from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner def calc_region(bbox, ratio, stride, featmap_size=None): """Calculate region of the box defined by the ratio, the ratio is from the center of the box to every edge.""" # project bbox on the feature f_bbox = bbox / stride x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2) def anchor_ctr_inside_region_flags(anchors, stride, region): """Get the flag indicate whether anchor centers are inside regions.""" x1, y1, x2, y2 = region f_anchors = anchors / stride x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) return flags @BBOX_ASSIGNERS.register_module() class RegionAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: center_ratio: ratio of the region in the center of the bbox to define positive sample. ignore_ratio: ratio of the region to define ignore samples. """ def __init__(self, center_ratio=0.2, ignore_ratio=0.5): self.center_ratio = center_ratio self.ignore_ratio = ignore_ratio def assign(self, mlvl_anchors, mlvl_valid_flags, gt_bboxes, img_meta, featmap_sizes, anchor_scale, anchor_strides, gt_bboxes_ignore=None, gt_labels=None, allowed_border=0): """Assign gt to anchors. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, and the order matters. 1. Assign every anchor to 0 (negative) 2. (For each gt_bboxes) Compute ignore flags based on ignore_region then assign -1 to anchors w.r.t. ignore flags 3. (For each gt_bboxes) Compute pos flags based on center_region then assign gt_bboxes to anchors w.r.t. pos flags 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor level then assign -1 to anchors w.r.t. ignore flags 5. Assign anchor outside of image to -1 Args: mlvl_anchors (list[Tensor]): Multi level anchors. mlvl_valid_flags (list[Tensor]): Multi level valid flags. gt_bboxes (Tensor): Ground truth bboxes of image img_meta (dict): Meta info of image. featmap_sizes (list[Tensor]): Feature mapsize each level anchor_scale (int): Scale of the anchor. anchor_strides (list[int]): Stride of the anchor. gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: :obj:`AssignResult`: The assign result. """ if gt_bboxes_ignore is not None: raise NotImplementedError num_gts = gt_bboxes.shape[0] num_bboxes = sum(x.shape[0] for x in mlvl_anchors) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = gt_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) num_lvls = len(mlvl_anchors) r1 = (1 - self.center_ratio) / 2 r2 = (1 - self.ignore_ratio) / 2 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() # 1. assign 0 (negative) by default mlvl_assigned_gt_inds = [] mlvl_ignore_flags = [] for lvl in range(num_lvls): h, w = featmap_sizes[lvl] assert h * w == mlvl_anchors[lvl].shape[0] assigned_gt_inds = gt_bboxes.new_full((h * w, ), 0, dtype=torch.long) ignore_flags = torch.zeros_like(assigned_gt_inds) mlvl_assigned_gt_inds.append(assigned_gt_inds) mlvl_ignore_flags.append(ignore_flags) for gt_id in range(num_gts): lvl = target_lvls[gt_id].item() featmap_size = featmap_sizes[lvl] stride = anchor_strides[lvl] anchors = mlvl_anchors[lvl] gt_bbox = gt_bboxes[gt_id, :4] # Compute regions ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) # 2. Assign -1 to ignore flags ignore_flags = anchor_ctr_inside_region_flags( anchors, stride, ignore_region) mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 3. Assign gt_bboxes to pos flags pos_flags = anchor_ctr_inside_region_flags(anchors, stride, ctr_region) mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 # 4. Assign -1 to ignore adjacent lvl if lvl > 0: d_lvl = lvl - 1 d_anchors = mlvl_anchors[d_lvl] d_featmap_size = featmap_sizes[d_lvl] d_stride = anchor_strides[d_lvl] d_ignore_region = calc_region(gt_bbox, r2, d_stride, d_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( d_anchors, d_stride, d_ignore_region) mlvl_ignore_flags[d_lvl][ignore_flags] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 u_anchors = mlvl_anchors[u_lvl] u_featmap_size = featmap_sizes[u_lvl] u_stride = anchor_strides[u_lvl] u_ignore_region = calc_region(gt_bbox, r2, u_stride, u_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( u_anchors, u_stride, u_ignore_region) mlvl_ignore_flags[u_lvl][ignore_flags] = 1 # 4. (cont.) Assign -1 to ignore adjacent lvl for lvl in range(num_lvls): ignore_flags = mlvl_ignore_flags[lvl] mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 5. Assign -1 to anchor outside of image flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) flat_anchors = torch.cat(mlvl_anchors) flat_valid_flags = torch.cat(mlvl_valid_flags) assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == flat_valid_flags.shape[0]) inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, img_meta['img_shape'], allowed_border) outside_flags = ~inside_flags flat_assigned_gt_inds[outside_flags] = -1 if gt_labels is not None: assigned_labels = torch.zeros_like(flat_assigned_gt_inds) pos_flags = assigned_gt_inds > 0 assigned_labels[pos_flags] = gt_labels[ flat_assigned_gt_inds[pos_flags] - 1] else: assigned_labels = None return AssignResult( num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
9,473
41.484305
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/grid_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class GridAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None): """Assign gt to bboxes. The process is very much like the max iou assigner, except that positive samples are constrained within the cell that the gt boxes fell in. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 3. for each bbox within a cell, if the iou with its nearest gt > pos_iou_thr and the center of that gt falls inside the cell, assign it to that bbox 4. for each gt bbox, assign its nearest proposals within the cell the gt bbox falls in to itself. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). box_responsible_flags (Tensor): flag to indicate whether box is responsible for prediction, shape(n, ) gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) # compute iou between all gt and bboxes overlaps = self.iou_calculator(gt_bboxes, bboxes) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # 2. assign negative: below # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts # shape of max_overlaps == argmax_overlaps == num_bboxes max_overlaps, argmax_overlaps = overlaps.max(dim=0) if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps <= self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, (tuple, list)): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) & (max_overlaps <= self.neg_iou_thr[1])] = 0 # 3. assign positive: falls into responsible cell and above # positive IOU threshold, the order matters. # the prior condition of comparison is to filter out all # unrelated anchors, i.e. not box_responsible_flags overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1. # calculate max_overlaps again, but this time we only consider IOUs # for anchors responsible for prediction max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) pos_inds = (max_overlaps > self.pos_iou_thr) & box_responsible_flags.type(torch.bool) assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign positive to max overlapped anchors within responsible cell for i in range(num_gts): if gt_max_overlaps[i] > self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ box_responsible_flags.type(torch.bool) assigned_gt_inds[max_iou_inds] = i + 1 elif box_responsible_flags[gt_argmax_overlaps[i]]: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # assign labels of positive anchors if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
6,863
42.719745
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/hungarian_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..match_costs import build_match_cost from ..transforms import bbox_cxcywh_to_xyxy from .assign_result import AssignResult from .base_assigner import BaseAssigner try: from scipy.optimize import linear_sum_assignment except ImportError: linear_sum_assignment = None @BBOX_ASSIGNERS.register_module() class HungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: classification cost, regression L1 cost and regression iou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: cls_weight (int | float, optional): The scale factor for classification cost. Default 1.0. bbox_weight (int | float, optional): The scale factor for regression L1 cost. Default 1.0. iou_weight (int | float, optional): The scale factor for regression iou cost. Default 1.0. iou_calculator (dict | optional): The config for the iou calculation. Default type `BboxOverlaps2D`. iou_mode (str | optional): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). Default "giou". """ def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.reg_cost = build_match_cost(reg_cost) self.iou_cost = build_match_cost(iou_cost) def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-7): """Computes one-to-one matching based on the weighted costs. This method assign each query prediction to a ground truth or background. The `assigned_gt_inds` with -1 means don't care, 0 means negative sample, and positive number is the index (1-based) of assigned gt. The assignment is done in the following steps, the order matters. 1. assign every prediction to -1 2. compute the weighted costs 3. do Hungarian matching on CPU based on the costs 4. assign all to 0 (background) first, then for each matched pair between predictions and gts, treat this prediction as foreground and assign the corresponding gt index (plus 1) to it. Args: bbox_pred (Tensor): Predicted boxes with normalized coordinates (cx, cy, w, h), which are all in range [0, 1]. Shape [num_query, 4]. cls_pred (Tensor): Predicted classification logits, shape [num_query, num_class]. gt_bboxes (Tensor): Ground truth boxes with unnormalized coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). img_meta (dict): Meta information for current image. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`. Default None. eps (int | float, optional): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.' num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) img_h, img_w, _ = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) # 2. compute the weighted costs # classification and bboxcost. cls_cost = self.cls_cost(cls_pred, gt_labels) # regression L1 cost normalize_gt_bboxes = gt_bboxes / factor reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) # regression iou cost, defaultly giou is used in official DETR. bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor iou_cost = self.iou_cost(bboxes, gt_bboxes) # weighted sum of above three costs cost = cls_cost + reg_cost + iou_cost # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() if linear_sum_assignment is None: raise ImportError('Please run "pip install scipy" ' 'to install scipy first.') matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to( bbox_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to( bbox_pred.device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels)
6,665
44.346939
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/task_aligned_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 @BBOX_ASSIGNERS.register_module() class TaskAlignedAssigner(BaseAssigner): """Task aligned assigner used in the paper: `TOOD: Task-aligned One-stage Object Detection. <https://arxiv.org/abs/2108.07755>`_. Assign a corresponding gt bbox or background to each predicted bbox. Each bbox will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (int): number of bbox selected in each level iou_calculator (dict): Config dict for iou calculator. Default: dict(type='BboxOverlaps2D') """ def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')): assert topk >= 1 self.topk = topk self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, pred_scores, decode_bboxes, anchors, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None, alpha=1, beta=6): """Assign gt to bboxes. The assignment is done in following steps 1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt 2. select top-k bbox as candidates for each gt 3. limit the positive sample's center in gt (because the anchor-free detector only can predict positive distance) Args: pred_scores (Tensor): predicted class probability, shape(n, num_classes) decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4) anchors (Tensor): pre-defined anchors, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`TaskAlignedAssignResult`: The assign result. """ anchors = anchors[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0) # compute alignment metric between all bbox and gt overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() bbox_scores = pred_scores[:, gt_labels].detach() # assign 0 by default assigned_gt_inds = anchors.new_full((num_bboxes, ), 0, dtype=torch.long) assign_metrics = anchors.new_zeros((num_bboxes, )) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = anchors.new_zeros((num_bboxes, )) if num_gt == 0: # No gt boxes, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = anchors.new_full((num_bboxes, ), -1, dtype=torch.long) assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result # select top-k bboxes as candidates for each gt alignment_metrics = bbox_scores**alpha * overlaps**beta topk = min(self.topk, alignment_metrics.size(0)) _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) candidate_metrics = alignment_metrics[candidate_idxs, torch.arange(num_gt)] is_pos = candidate_metrics > 0 # limit the positive sample's center in gt anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0 anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0 for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_anchors_cx = anchors_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_anchors_cy = anchors_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest iou will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assign_metrics[max_overlaps != -INF] = alignment_metrics[ max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result
6,554
42.125
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/base_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign boxes to either a ground truth boxes or a negative boxes."""
375
33.181818
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/uniform_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from ..transforms import bbox_xyxy_to_cxcywh from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class UniformAssigner(BaseAssigner): """Uniform Matching between the anchors and gt boxes, which can achieve balance in positive anchors, and gt_bboxes_ignore was not considered for now. Args: pos_ignore_thr (float): the threshold to ignore positive anchors neg_ignore_thr (float): the threshold to ignore negative anchors match_times(int): Number of positive anchors for each gt box. Default 4. iou_calculator (dict): iou_calculator config """ def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 assign_result = AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property( 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result # 2. Compute the L1 cost between boxes # Note that we use anchors and predict boxes both cost_bbox = torch.cdist( bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_anchors = torch.cdist( bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) # We found that topk function has different results in cpu and # cuda mode. In order to ensure consistency with the source code, # we also use cpu mode. # TODO: Check whether the performance of cpu and cuda are the same. C = cost_bbox.cpu() C1 = cost_bbox_anchors.cpu() # self.match_times x n index = torch.topk( C, # c=b,n,x c[i]=n,x k=self.match_times, dim=0, largest=False)[1] # self.match_times x n index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] # (self.match_times*2) x n indexes = torch.cat((index, index1), dim=1).reshape(-1).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) pred_max_overlaps, _ = pred_overlaps.max(dim=1) anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) # 3. Compute the ignore indexes use gt_bboxes and predict boxes ignore_idx = pred_max_overlaps > self.neg_ignore_thr assigned_gt_inds[ignore_idx] = -1 # 4. Compute the ignore indexes of positive sample use anchors # and predict boxes pos_gt_index = torch.arange( 0, C1.size(1), device=bbox_pred.device).repeat(self.match_times * 2) pos_ious = anchor_overlaps[indexes, pos_gt_index] pos_ignore_idx = pos_ious < self.pos_ignore_thr pos_gt_index_with_ignore = pos_gt_index + 1 pos_gt_index_with_ignore[pos_ignore_idx] = -1 assigned_gt_inds[indexes] = pos_gt_index_with_ignore if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
5,556
39.860294
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/point_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale=4, pos_num=3): self.scale = scale self.pos_num = pos_num def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with the background_label (-1), or a label number. -1 is background, and semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to the background_label (-1) 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: points (Tensor): points to be assigned, shape(n, 3) while last dimension stands for (x, y, stride). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. NOTE: currently unused. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_points = points.shape[0] num_gts = gt_bboxes.shape[0] if num_gts == 0 or num_points == 0: # If no truth assign everything to the background assigned_gt_inds = points.new_full((num_points, ), 0, dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = points.new_full((num_points, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels)
5,995
43.414815
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssigner from .hungarian_assigner import HungarianAssigner from .max_iou_assigner import MaxIoUAssigner from .point_assigner import PointAssigner from .region_assigner import RegionAssigner from .sim_ota_assigner import SimOTAAssigner from .task_aligned_assigner import TaskAlignedAssigner from .uniform_assigner import UniformAssigner __all__ = [ 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', 'TaskAlignedAssigner' ]
926
41.136364
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .max_iou_assigner import MaxIoUAssigner @BBOX_ASSIGNERS.register_module() class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with an integer indicating the ground truth index. (semi-positive index: gt label (0-based), -1: background) - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, approxs, squares, approxs_per_octave, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, or a semi-positive number. background_label (-1) means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to background_label (-1) 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to background 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: approxs (Tensor): Bounding boxes to be assigned, shape(approxs_per_octave*n, 4). squares (Tensor): Base Bounding boxes to be assigned, shape(n, 4). approxs_per_octave (int): number of approxs per octave gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_squares = squares.size(0) num_gts = gt_bboxes.size(0) if num_squares == 0 or num_gts == 0: # No predictions and/or truth, return empty assignment overlaps = approxs.new(num_gts, num_squares) assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) return assign_result # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose( approxs.view(num_squares, approxs_per_octave, 4), 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = self.iou_calculator(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( squares, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, squares, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
6,697
44.564626
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/assigners/max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class MaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, or a semi-positive integer indicating the ground truth index. - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow low quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. Details are demonstrated in Step 4. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to the background 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. Example: >>> self = MaxIoUAssigner(0.5, 0.5) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = bboxes.device bboxes = bboxes.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() overlaps = self.iou_calculator(gt_bboxes, bboxes) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, bboxes, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result def assign_wrt_overlaps(self, overlaps, gt_labels=None): """Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwrite the assigned_gt_inds assigned # in Step 3. Thus, the assigned gt might not be the best one for # prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, # bbox 1 will be assigned as the best target for bbox A in step 3. # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's # assigned_gt_inds will be overwritten to be bbox B. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
9,798
44.78972
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/match_costs/match_cost.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.bbox.iou_calculators import bbox_overlaps from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh from .builder import MATCH_COST @MATCH_COST.register_module() class BBoxL1Cost: """BBoxL1Cost. Args: weight (int | float, optional): loss_weight box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN Examples: >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost >>> import torch >>> self = BBoxL1Cost() >>> bbox_pred = torch.rand(1, 4) >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(bbox_pred, gt_bboxes, factor) tensor([[1.6172, 1.6422]]) """ def __init__(self, weight=1., box_format='xyxy'): self.weight = weight assert box_format in ['xyxy', 'xywh'] self.box_format = box_format def __call__(self, bbox_pred, gt_bboxes): """ Args: bbox_pred (Tensor): Predicted boxes with normalized coordinates (cx, cy, w, h), which are all in range [0, 1]. Shape [num_query, 4]. gt_bboxes (Tensor): Ground truth boxes with normalized coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. Returns: torch.Tensor: bbox_cost value with weight """ if self.box_format == 'xywh': gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) elif self.box_format == 'xyxy': bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) return bbox_cost * self.weight @MATCH_COST.register_module() class FocalLossCost: """FocalLossCost. Args: weight (int | float, optional): loss_weight alpha (int | float, optional): focal_loss alpha gamma (int | float, optional): focal_loss gamma eps (float, optional): default 1e-12 Examples: >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost >>> import torch >>> self = FocalLossCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3236, -0.3364, -0.2699], [-0.3439, -0.3209, -0.4807], [-0.4099, -0.3795, -0.2929], [-0.1950, -0.1207, -0.2626]]) """ def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12): self.weight = weight self.alpha = alpha self.gamma = gamma self.eps = eps def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits, shape [num_query, num_class]. gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight @MATCH_COST.register_module() class ClassificationCost: """ClsSoftmaxCost. Args: weight (int | float, optional): loss_weight Examples: >>> from mmdet.core.bbox.match_costs.match_cost import \ ... ClassificationCost >>> import torch >>> self = ClassificationCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3430, -0.3525, -0.3045], [-0.3077, -0.2931, -0.3992], [-0.3664, -0.3455, -0.2881], [-0.3343, -0.2701, -0.3956]]) """ def __init__(self, weight=1.): self.weight = weight def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits, shape [num_query, num_class]. gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ # Following the official DETR repo, contrary to the loss that # NLL is used, we approximate it in 1 - cls_score[gt_label]. # The 1 is a constant that doesn't change the matching, # so it can be omitted. cls_score = cls_pred.softmax(-1) cls_cost = -cls_score[:, gt_labels] return cls_cost * self.weight @MATCH_COST.register_module() class IoUCost: """IoUCost. Args: iou_mode (str, optional): iou mode such as 'iou' | 'giou' weight (int | float, optional): loss weight Examples: >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost >>> import torch >>> self = IoUCost() >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> self(bboxes, gt_bboxes) tensor([[-0.1250, 0.1667], [ 0.1667, -0.5000]]) """ def __init__(self, iou_mode='giou', weight=1.): self.weight = weight self.iou_mode = iou_mode def __call__(self, bboxes, gt_bboxes): """ Args: bboxes (Tensor): Predicted boxes with unnormalized coordinates (x1, y1, x2, y2). Shape [num_query, 4]. gt_bboxes (Tensor): Ground truth boxes with unnormalized coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. Returns: torch.Tensor: iou_cost value with weight """ # overlaps: [num_bboxes, num_gt] overlaps = bbox_overlaps( bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) # The 1 is a constant that doesn't change the matching, so omitted. iou_cost = -overlaps return iou_cost * self.weight
6,342
33.102151
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/core/bbox/match_costs/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import build_match_cost from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost' ]
271
29.222222
78
py