repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
158
30.8
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py
_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
457
37.166667
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py
_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
151
29.4
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256, stride=2, num_outs=5)) img_norm_cfg = dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,333
31.873239
78
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
463
37.666667
78
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256)) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20)
1,291
30.512195
76
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py
_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
151
29.4
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py
_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
459
40.818182
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
484
39.416667
78
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py
_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' # learning policy lr_config = dict(step=[24, 27]) runner = dict(type='EpochBasedRunner', max_epochs=28)
158
30.8
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
153
29.8
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
462
37.583333
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
443
39.363636
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py
_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
151
29.4
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' # model settings model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
455
37
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256))
1,185
30.210526
76
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' img_norm_cfg = dict( mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
1,337
32.45
78
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py
_base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
158
30.8
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py
_base_ = './htc_hrnetv2p_w32_20e_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
431
38.272727
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py
_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' # model settings model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
487
36.538462
78
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py
_base_ = './htc_hrnetv2p_w40_20e_coco.py' # learning policy lr_config = dict(step=[24, 27]) runner = dict(type='EpochBasedRunner', max_epochs=28)
146
28.4
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py
_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
153
29.8
53
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py
_base_ = '../htc/htc_r50_fpn_20e_coco.py' model = dict( backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), neck=dict( _delete_=True, type='HRFPN', in_channels=[32, 64, 128, 256], out_channels=256))
1,170
29.815789
76
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py
_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' model = dict( backbone=dict( extra=dict( stage2=dict(num_channels=(18, 36)), stage3=dict(num_channels=(18, 36, 72)), stage4=dict(num_channels=(18, 36, 72, 144))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
436
38.727273
77
py
DSLA-DSLA
DSLA-DSLA/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py
_base_ = './htc_hrnetv2p_w32_20e_coco.py' model = dict( backbone=dict( type='HRNet', extra=dict( stage2=dict(num_channels=(40, 80)), stage3=dict(num_channels=(40, 80, 160)), stage4=dict(num_channels=(40, 80, 160, 320))), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
456
37.083333
78
py
DSLA-DSLA
DSLA-DSLA/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ]))
403
27.857143
56
py
DSLA-DSLA
DSLA-DSLA/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
575
32.882353
72
py
DSLA-DSLA
DSLA-DSLA/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
575
32.882353
72
py
DSLA-DSLA
DSLA-DSLA/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ]))
403
27.857143
56
py
DSLA-DSLA
DSLA-DSLA/configs/yolox/yolox_m_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.67, widen_factor=0.75), neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), bbox_head=dict(in_channels=192, feat_channels=192), )
266
28.666667
79
py
DSLA-DSLA
DSLA-DSLA/configs/yolox/yolox_s_8x8_300e_coco.py
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] img_scale = (640, 640) # model settings model = dict( type='YOLOX', input_size=img_scale, random_size_range=(15, 25), random_size_interval=10, backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5), neck=dict( type='YOLOXPAFPN', in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict( type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128), train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), # In order to align the source code, the threshold of the val phase is # 0.01, and the threshold of the test phase is 0.001. test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) # dataset settings data_root = 'data/coco/' dataset_type = 'CocoDataset' train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict( type='MixUp', img_scale=img_scale, ratio_range=(0.8, 1.6), pad_val=114.0), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), # According to the official implementation, multi-scale # training is not considered here but in the # 'mmdet/models/detectors/yolox.py'. dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict( type='Pad', pad_to_square=True, # If the image is three-channel, the pad value needs # to be set separately for each channel. pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] train_dataset = dict( type='MultiImageMixDataset', dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), pipeline=train_pipeline) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_scale, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, persistent_workers=True, train=train_dataset, val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer # default 8 gpu optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True, paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) max_epochs = 300 num_last_epochs = 15 resume_from = None interval = 10 # learning policy lr_config = dict( _delete_=True, policy='YOLOX', warmup='exp', by_epoch=False, warmup_by_epoch=True, warmup_ratio=1, warmup_iters=5, # 5 epoch num_last_epochs=num_last_epochs, min_lr_ratio=0.05) runner = dict(type='EpochBasedRunner', max_epochs=max_epochs) custom_hooks = [ dict( type='YOLOXModeSwitchHook', num_last_epochs=num_last_epochs, priority=48), dict( type='SyncNormHook', num_last_epochs=num_last_epochs, interval=interval, priority=48), dict( type='ExpMomentumEMAHook', resume_from=resume_from, momentum=0.0001, priority=49) ] checkpoint_config = dict(interval=interval) evaluation = dict( save_best='auto', # The evaluation interval is 'interval' when running epoch is # less than ‘max_epochs - num_last_epochs’. # The evaluation interval is 1 when running epoch is greater than # or equal to ‘max_epochs - num_last_epochs’. interval=interval, dynamic_intervals=[(max_epochs - num_last_epochs, 1)], metric='bbox') log_config = dict(interval=50)
4,793
28.776398
79
py
DSLA-DSLA
DSLA-DSLA/configs/yolox/yolox_l_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=1.0, widen_factor=1.0), neck=dict( in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), bbox_head=dict(in_channels=256, feat_channels=256))
272
29.333333
74
py
DSLA-DSLA
DSLA-DSLA/configs/yolox/yolox_x_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=1.33, widen_factor=1.25), neck=dict( in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), bbox_head=dict(in_channels=320, feat_channels=320))
274
29.555556
74
py
DSLA-DSLA
DSLA-DSLA/configs/yolox/yolox_nano_8x8_300e_coco.py
_base_ = './yolox_tiny_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True), neck=dict( in_channels=[64, 128, 256], out_channels=64, num_csp_blocks=1, use_depthwise=True), bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True))
356
28.75
77
py
DSLA-DSLA
DSLA-DSLA/configs/yolox/yolox_tiny_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.5, 1.5), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] train_dataset = dict(pipeline=train_pipeline) data = dict( train=train_dataset, val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,617
28.962963
76
py
DSLA-DSLA
DSLA-DSLA/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,043
33.8
123
py
DSLA-DSLA
DSLA-DSLA/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict(warmup_iters=1000, step=[27, 33]) runner = dict(max_epochs=36)
3,305
34.934783
123
py
DSLA-DSLA
DSLA-DSLA/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa model = dict( backbone=dict( depths=[2, 2, 18, 2], init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
318
44.571429
124
py
DSLA-DSLA
DSLA-DSLA/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict(warmup_iters=1000, step=[8, 11]) runner = dict(max_epochs=12)
1,301
29.27907
123
py
DSLA-DSLA
DSLA-DSLA/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py' # you need to set mode='dynamic' if you are using pytorch<=1.5.0 fp16 = dict(loss_scale=dict(init_scale=512))
169
41.5
64
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
464
26.352941
62
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
546
33.1875
74
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', # use P5 num_outs=5, relu_before_extra_convs=True), bbox_head=dict( type='VFNetHead', num_classes=80, in_channels=256, stacked_convs=3, feat_channels=256, strides=[8, 16, 32, 64, 128], center_sampling=False, dcn_on_last_conv=False, use_atss=True, use_vfl=True, loss_cls=dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.5), loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # data setting dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.1, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
3,240
29.009259
79
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
602
30.736842
74
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
201
27.857143
61
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
585
31.555556
76
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
1,312
31.825
77
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), bbox_head=dict(dcn_on_last_conv=True))
248
34.571429
74
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r101_fpn_1x_coco.py
_base_ = './vfnet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
193
26.714286
61
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_r101_fpn_2x_coco.py
_base_ = './vfnet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
279
30.111111
61
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
447
27
76
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
585
31.555556
76
py
DSLA-DSLA
DSLA-DSLA/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
447
27
76
py
DSLA-DSLA
DSLA-DSLA/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='CenterNet', backbone=dict( type='ResNet', depth=18, norm_eval=False, norm_cfg=dict(type='BN'), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict( type='CTResNetNeck', in_channel=512, num_deconv_filters=(256, 128, 64), num_deconv_kernels=(4, 4, 4), use_dcn=True), bbox_head=dict( type='CenterNetHead', num_classes=80, in_channel=64, feat_channel=64, loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0), loss_wh=dict(type='L1Loss', loss_weight=0.1), loss_offset=dict(type='L1Loss', loss_weight=1.0)), train_cfg=None, test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100)) # We fixed the incorrect img_norm_cfg problem in the source code. img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True, color_type='color'), dict(type='LoadAnnotations', with_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='RandomCenterCropPad', crop_size=(512, 512), ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True, test_pad_mode=None), dict(type='Resize', img_scale=(512, 512), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='MultiScaleFlipAug', scale_factor=1.0, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict( type='RandomCenterCropPad', ratios=None, border=None, mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True, test_mode=True, test_pad_mode=['logical_or', 31], test_pad_add_pix=1), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict( type='Collect', meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg', 'border'), keys=['img']) ]) ] dataset_type = 'CocoDataset' data_root = 'data/coco/' # Use RepeatDataset to speed up training data = dict( samples_per_gpu=16, workers_per_gpu=4, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer # Based on the default settings of modern detectors, the SGD effect is better # than the Adam in the source code, so we use SGD default settings and # if you use adam+lr5e-4, the map is 29.1. optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) # learning policy # Based on the default settings of modern detectors, we added warmup settings. lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 1000, step=[18, 24]) # the real step is [18*5, 24*5] runner = dict(max_epochs=28) # the real epoch is 28*5=140
4,045
31.894309
79
py
DSLA-DSLA
DSLA-DSLA/configs/centernet/centernet_resnet18_140e_coco.py
_base_ = './centernet_resnet18_dcnv2_140e_coco.py' model = dict(neck=dict(use_dcn=False))
91
22
50
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py
_base_ = './fovea_r50_fpn_4x4_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
146
28.4
53
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='FOVEA', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, num_outs=5, add_extra_convs='on_input'), bbox_head=dict( type='FoveaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], base_edge_list=[16, 32, 64, 128, 256], scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), sigma=0.4, with_deform=False, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=1.50, alpha=0.4, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), # training and testing settings train_cfg=dict(), test_cfg=dict( nms_pre=1000, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) data = dict(samples_per_gpu=4, workers_per_gpu=4) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,612
29.433962
79
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py
_base_ = './fovea_r50_fpn_4x4_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
_base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
1,042
33.766667
77
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
_base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
901
33.692308
77
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py
_base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py
_base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
417
31.153846
69
py
DSLA-DSLA
DSLA-DSLA/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py
_base_ = './fovea_r50_fpn_4x4_1x_coco.py' model = dict( bbox_head=dict( with_deform=True, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
362
32
69
py
DSLA-DSLA
DSLA-DSLA/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DoubleHeadRoIHead', reg_roi_scale_factor=1.3, bbox_head=dict( _delete_=True, type='DoubleConvFCBBoxHead', num_convs=4, num_fcs=2, in_channels=256, conv_out_channels=1024, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0))))
845
34.25
77
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5))
533
28.666667
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_8.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 720, 1920], out_channels=256, num_outs=5))
521
28
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5))
527
28.333333
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py
_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5))
520
27.944444
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_12gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), neck=dict( type='FPN', in_channels=[224, 448, 896, 2240], out_channels=256, num_outs=5))
520
27.944444
72
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py
_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
140
34.25
53
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
760
27.185185
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5))
528
28.388889
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py
_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5))
520
27.944444
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5))
521
28
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5))
528
28.388889
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(weight_decay=0.00005)
1,888
29.467742
77
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
759
27.148148
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
761
27.222222
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
_base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')))
305
37.25
74
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5))
534
28.722222
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5))
535
28.777778
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,004
32.416667
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5))
529
28.444444
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco_instance.py', '../_base_/models/cascade_mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ # Images are converted to float32 directly after loading in PyCls dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(weight_decay=0.00005)
2,005
30.34375
77
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
1,920
32.12069
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ # Images are converted to float32 directly after loading in PyCls dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
2,015
33.169492
77
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,261
32.761194
77
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_6.4gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')), neck=dict( type='FPN', in_channels=[168, 392, 784, 1624], out_channels=256, num_outs=5))
522
28.055556
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
760
27.185185
73
py
DSLA-DSLA
DSLA-DSLA/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5))
534
28.722222
73
py
DSLA-DSLA
DSLA-DSLA/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DynamicRoIHead', bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict(nms=dict(iou_threshold=0.85)), rcnn=dict( dynamic_rcnn=dict( iou_topk=75, beta_topk=10, update_iter_interval=100, initial_iou=0.4, initial_beta=1.0))), test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85))))
1,051
35.275862
77
py
DSLA-DSLA
DSLA-DSLA/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline))
1,072
31.515152
78
py
DSLA-DSLA
DSLA-DSLA/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
418
28.928571
78
py
DSLA-DSLA
DSLA-DSLA/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
416
28.785714
76
py
DSLA-DSLA
DSLA-DSLA/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline))
1,070
31.454545
77
py
DSLA-DSLA
DSLA-DSLA/configs/wider_face/ssd300_wider_face.py
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=1)) # optimizer optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[16, 20]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24) log_config = dict(interval=1)
517
26.263158
71
py
DSLA-DSLA
DSLA-DSLA/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg))) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=False, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,947
29.920635
79
py
DSLA-DSLA
DSLA-DSLA/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
_base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' model = dict( backbone=dict( stem_channels=128, depth=101, init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest101')))
261
31.75
78
py
DSLA-DSLA
DSLA-DSLA/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=[ dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared4Conv1FCBBoxHead', in_channels=256, conv_out_channels=256, fc_out_channels=1024, norm_cfg=norm_cfg, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict(norm_cfg=norm_cfg))) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
4,255
34.764706
79
py
DSLA-DSLA
DSLA-DSLA/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( type='ResNeSt', stem_channels=64, depth=50, radix=2, reduction_factor=4, avg_down_stride=True, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) # # use ResNeSt img_norm img_norm_cfg = dict( mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,068
30.830769
79
py