repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ]
177
28.666667
72
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ]
91
22
77
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py
_base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=10000) evaluation = dict(interval=10000, metric='bbox')
380
22.8125
69
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
177
28.666667
72
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( test_cfg=dict( rcnn=dict( score_thr=0.05, nms=dict(type='soft_nms', iou_threshold=0.5), max_per_img=100)))
347
25.769231
72
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
421
27.133333
76
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
89
21.5
43
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
162
31.6
57
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
199
27.571429
61
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,923
29.539683
77
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,410
32.595238
72
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
206
24.875
61
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
201
27.857143
64
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,505
30.375
72
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py
_base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
421
27.133333
76
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='IoULoss', loss_weight=10.0))))
200
27.714286
63
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,388
33.725
72
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py
_base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
421
27.133333
76
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
468
26.588235
77
py
DSLA-DSLA
DSLA-DSLA/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,448
32.697674
72
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py
_base_ = 'mask_rcnn_r50_fpn_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict( type='FPG', in_channels=[256, 512, 1024, 2048], out_channels=256, inter_channels=256, num_outs=5, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
1,450
28.612245
64
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py
_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict( _delete_=True, type='FPG', in_channels=[256, 512, 1024, 2048], out_channels=256, inter_channels=256, num_outs=5, add_extra_convs=True, start_level=1, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) evaluation = dict(interval=2)
1,571
28.111111
64
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg, norm_eval=False), neck=dict(norm_cfg=norm_cfg), roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(max_epochs=50) evaluation = dict(interval=2)
2,140
30.028986
77
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg, norm_eval=False), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, norm_cfg=norm_cfg, num_outs=5), roi_head=dict( bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(max_epochs=50) evaluation = dict(interval=2)
2,312
29.84
78
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py
_base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py' model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128), mask_roi_extractor=dict(out_channels=128), mask_head=dict(in_channels=128)))
357
31.545455
52
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py
_base_ = 'retinanet_r50_fpg_crop640_50e_coco.py' model = dict( neck=dict(out_channels=128, inter_channels=128), bbox_head=dict(in_channels=128))
154
24.833333
52
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py
_base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict(out_channels=128, inter_channels=128), rpn_head=dict(in_channels=128), roi_head=dict( bbox_roi_extractor=dict(out_channels=128), bbox_head=dict(in_channels=128)))
314
30.5
52
py
DSLA-DSLA
DSLA-DSLA/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py
_base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( neck=dict( type='FPG', in_channels=[256, 512, 1024, 2048], out_channels=256, inter_channels=256, num_outs=5, stack_times=9, paths=['bu'] * 9, same_down_trans=None, same_up_trans=dict( type='conv', kernel_size=3, stride=2, padding=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_lateral_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), across_down_trans=dict( type='interpolation_conv', mode='nearest', kernel_size=3, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'), inplace=False), across_up_trans=None, across_skip_trans=dict( type='conv', kernel_size=1, norm_cfg=norm_cfg, inplace=False, order=('act', 'conv', 'norm')), output_trans=dict( type='last_conv', kernel_size=3, order=('act', 'conv', 'norm'), inplace=False), norm_cfg=norm_cfg, skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
1,452
28.653061
64
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( roi_head=dict(bbox_head=[ dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) ]))
3,155
35.275862
79
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
2,474
32.445946
77
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,619
30.764706
73
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,849
31.45614
73
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict(bbox_head=[ dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)), dict( type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) ]))
3,296
35.230769
79
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( _delete_=True, type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0))))
1,228
34.114286
77
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,708
31.245283
73
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # model settings norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), norm_cfg=norm_cfg, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
2,474
32.445946
77
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict( bbox_head=dict( _delete_=True, type='SABLHead', num_classes=80, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0))))
1,369
34.128205
77
py
DSLA-DSLA
DSLA-DSLA/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( _delete_=True, type='SABLRetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), # training and testing settings train_cfg=dict( assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,760
31.018182
73
py
DSLA-DSLA
DSLA-DSLA/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5))
200
21.333333
56
py
DSLA-DSLA
DSLA-DSLA/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
506
32.8
79
py
DSLA-DSLA
DSLA-DSLA/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') # dataset settings dataset_type = 'CocoDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1000, 600), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file='data/voc0712_trainval.json', img_prefix='data/VOCdevkit', pipeline=train_pipeline, classes=CLASSES)), val=dict( type=dataset_type, ann_file='data/voc07_test.json', img_prefix='data/VOCdevkit', pipeline=test_pipeline, classes=CLASSES), test=dict( type=dataset_type, ann_file='data/voc07_test.json', img_prefix='data/VOCdevkit', pipeline=test_pipeline, classes=CLASSES)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
2,524
32.223684
79
py
DSLA-DSLA
DSLA-DSLA/configs/pascal_voc/ssd512_voc0712.py
_base_ = 'ssd300_voc0712.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), anchor_generator=dict( input_size=input_size, strides=[8, 16, 32, 64, 128, 256, 512], basesize_ratio_range=(0.15, 0.9), ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,954
32.706897
79
py
DSLA-DSLA
DSLA-DSLA/configs/pascal_voc/ssd300_voc0712.py
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict( bbox_head=dict( num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 20]) checkpoint_config = dict(interval=1) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24)
2,222
30.757143
79
py
DSLA-DSLA
DSLA-DSLA/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=20)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
489
31.666667
77
py
DSLA-DSLA
DSLA-DSLA/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
233
28.25
76
py
DSLA-DSLA
DSLA-DSLA/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
214
25.875
61
py
DSLA-DSLA
DSLA-DSLA/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline))
2,254
40
78
py
DSLA-DSLA
DSLA-DSLA/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, value) for value in min_values], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(policy='step', step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
879
35.666667
77
py
DSLA-DSLA
DSLA-DSLA/configs/queryinst/queryinst_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] num_stages = 6 num_proposals = 100 model = dict( type='QueryInst', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ], mask_head=[ dict( type='DynamicMaskHead', dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), num_convs=4, num_classes=80, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, class_agnostic=False, norm_cfg=dict(type='BN'), upsample_cfg=dict(type='deconv', scale_factor=2), loss_mask=dict( type='DiceLoss', loss_weight=8.0, use_sigmoid=True, activate=False, eps=1e-5)) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1, mask_size=28, ) for _ in range(num_stages) ]), test_cfg=dict( rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[8, 11], warmup_iters=1000) runner = dict(type='EpochBasedRunner', max_epochs=12)
4,956
34.661871
79
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
174
28.166667
72
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py
_base_ = './mask_rcnn_r101_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
420
27.066667
76
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
485
24.578947
76
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline))
2,132
31.318182
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,606
31.14
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,838
29.147541
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
165
32.2
60
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py
_base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
426
27.466667
76
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
222
26.875
67
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,413
34.35
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,556
32.847826
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( bbox_roi_extractor=dict( roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2, aligned=False)), bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_roi_extractor=dict( roi_layer=dict( type='RoIAlign', output_size=14, sampling_ratio=2, aligned=False)))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,066
32.33871
78
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline))
2,537
28.511628
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline))
805
32.583333
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
420
27.066667
76
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
485
24.578947
76
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
87
21
41
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
263
23
61
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
426
27.466667
76
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ]
174
28.166667
72
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,660
28.660714
77
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
165
32.2
60
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ]
107
20.6
49
py
DSLA-DSLA
DSLA-DSLA/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,412
33.463415
77
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' model = dict( bbox_head=dict( type='PISARetinaHead', loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
272
33.125
73
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
929
29
77
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( type='PISARetinaHead', loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
265
32.25
73
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
926
28.903226
77
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_ssd512_coco.py
_base_ = '../ssd/ssd512_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
247
26.555556
71
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_ssd300_coco.py
_base_ = '../ssd/ssd300_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
247
26.555556
71
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
933
29.129032
77
py
DSLA-DSLA
DSLA-DSLA/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
922
28.774194
77
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
428
27.6
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
435
28.066667
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py
_base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' ]
183
29.666667
73
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,389
31.325581
72
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,426
32.97619
77
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
435
28.066667
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
423
27.266667
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
200
27.714286
61
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,878
29.803279
77
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
213
29.571429
61
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
182
29.5
72
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
206
28.571429
61
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py
_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
230
27.875
67
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' model = dict( type='CascadeRCNN', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
446
26.9375
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
427
27.533333
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' model = dict( type='CascadeRCNN', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
447
27
76
py
DSLA-DSLA
DSLA-DSLA/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20)
149
29
53
py