repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
ERD
ERD-main/configs/cascade_rcnn/cascade-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py
_base_ = './cascade-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
216
26.125
61
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_ms-3x_coco.py
_base_ = [ '../common/ms_3x_coco-instance.py', '../_base_/models/cascade-mask-rcnn_r50_fpn.py' ]
105
20.2
51
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_r101_fpn_ms-3x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
208
28.857143
61
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_ms-3x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
430
27.733333
76
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' ]
183
29.666667
73
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
427
27.533333
76
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_ms-3x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
430
27.733333
76
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
205
28.428571
61
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py
_base_ = [ '../common/ms_3x_coco-instance.py', '../_base_/models/cascade-mask-rcnn_r50_fpn.py' ] model = dict( # use caffe img_norm data_preprocessor=dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False), backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')))
502
25.473684
66
py
ERD
ERD-main/configs/cascade_rcnn/cascade-rcnn_x101-32x4d_fpn_1x_coco.py
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
422
27.2
76
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_x101-64x4d_fpn_20e_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
428
27.6
76
py
ERD
ERD-main/configs/cascade_rcnn/cascade-rcnn_x101-64x4d_fpn_1x_coco.py
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' model = dict( type='CascadeRCNN', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
446
26.9375
76
py
ERD
ERD-main/configs/cascade_rcnn/cascade-mask-rcnn_x101-32x8d_fpn_ms-3x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_ms-3x_coco.py' model = dict( # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
758
29.36
68
py
ERD
ERD-main/configs/cascade_rcnn/cascade-rcnn_r50-caffe_fpn_1x_coco.py
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' model = dict( # use caffe img_norm data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')))
483
27.470588
66
py
ERD
ERD-main/configs/cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
200
27.714286
61
py
ERD
ERD-main/configs/nas_fcos/nas-fcos_r50-caffe_fpn_fcoshead-gn-head_4xb4-1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='NASFCOS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False, eps=0), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='NASFCOS_FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, norm_cfg=dict(type='BN'), conv_cfg=dict(type='DCNv2', deform_groups=2)), bbox_head=dict( type='FCOSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_cfg=dict(type='GN', num_groups=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # dataset settings train_dataloader = dict(batch_size=4, num_workers=2) # optimizer optim_wrapper = dict( optimizer=dict(lr=0.01), paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
2,179
27.684211
73
py
ERD
ERD-main/configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='NASFCOS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False, eps=0), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='NASFCOS_FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, norm_cfg=dict(type='BN'), conv_cfg=dict(type='DCNv2', deform_groups=2)), bbox_head=dict( type='NASFCOSHead', num_classes=80, in_channels=256, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_cfg=dict(type='GN', num_groups=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # dataset settings train_dataloader = dict(batch_size=4, num_workers=2) # optimizer optim_wrapper = dict( optimizer=dict(lr=0.01), paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
2,157
27.773333
73
py
ERD
ERD-main/configs/rpn/rpn_r50-caffe_fpn_1x_coco.py
_base_ = './rpn_r50_fpn_1x_coco.py' # use caffe img_norm model = dict( data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')))
493
28.058824
66
py
ERD
ERD-main/configs/rpn/rpn_x101-64x4d_fpn_1x_coco.py
_base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
413
26.6
76
py
ERD
ERD-main/configs/rpn/rpn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] val_evaluator = dict(metric='proposal_fast') test_evaluator = val_evaluator # inference on val dataset and dump the proposals with evaluate metric # data_root = 'data/coco/' # test_evaluator = [ # dict( # type='DumpProposals', # output_dir=data_root + 'proposals/', # proposals_file='rpn_r50_fpn_1x_val2017.pkl'), # dict( # type='CocoMetric', # ann_file=data_root + 'annotations/instances_val2017.json', # metric='proposal_fast', # backend_args={{_base_.backend_args}}, # format_only=False) # ] # inference on training dataset and dump the proposals without evaluate metric # data_root = 'data/coco/' # test_dataloader = dict( # dataset=dict( # ann_file='annotations/instances_train2017.json', # data_prefix=dict(img='train2017/'))) # # test_evaluator = [ # dict( # type='DumpProposals', # output_dir=data_root + 'proposals/', # proposals_file='rpn_r50_fpn_1x_train2017.pkl'), # ]
1,169
30.621622
78
py
ERD
ERD-main/configs/rpn/rpn_x101-64x4d_fpn_2x_coco.py
_base_ = './rpn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
413
26.6
76
py
ERD
ERD-main/configs/rpn/rpn_x101-32x4d_fpn_1x_coco.py
_base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
413
26.6
76
py
ERD
ERD-main/configs/rpn/rpn_r101_fpn_1x_coco.py
_base_ = './rpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
191
26.428571
61
py
ERD
ERD-main/configs/rpn/rpn_r101-caffe_fpn_1x_coco.py
_base_ = './rpn_r50-caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
216
26.125
67
py
ERD
ERD-main/configs/rpn/rpn_r50_fpn_2x_coco.py
_base_ = './rpn_r50_fpn_1x_coco.py' # learning policy max_epochs = 24 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[16, 22], gamma=0.1) ]
422
22.5
79
py
ERD
ERD-main/configs/rpn/rpn_x101-32x4d_fpn_2x_coco.py
_base_ = './rpn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
413
26.6
76
py
ERD
ERD-main/configs/rpn/rpn_r50-caffe-c4_1x_coco.py
_base_ = [ '../_base_/models/rpn_r50-caffe-c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] val_evaluator = dict(metric='proposal_fast') test_evaluator = val_evaluator
251
27
72
py
ERD
ERD-main/configs/rpn/rpn_r101_fpn_2x_coco.py
_base_ = './rpn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
191
26.428571
61
py
ERD
ERD-main/configs/deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py
_base_ = 'deformable-detr_r50_16xb2-50e_coco.py' model = dict(with_box_refine=True)
84
27.333333
48
py
ERD
ERD-main/configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='DeformableDETR', num_queries=300, num_feature_levels=4, with_box_refine=False, as_two_stage=False, data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=1), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[512, 1024, 2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=dict(type='GN', num_groups=32), num_outs=4), encoder=dict( # DeformableDetrTransformerEncoder num_layers=6, layer_cfg=dict( # DeformableDetrTransformerEncoderLayer self_attn_cfg=dict( # MultiScaleDeformableAttention embed_dims=256, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=1024, ffn_drop=0.1))), decoder=dict( # DeformableDetrTransformerDecoder num_layers=6, return_intermediate=True, layer_cfg=dict( # DeformableDetrTransformerDecoderLayer self_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), cross_attn_cfg=dict( # MultiScaleDeformableAttention embed_dims=256, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=1024, ffn_drop=0.1)), post_norm_cfg=None), positional_encoding=dict(num_feats=128, normalize=True, offset=-0.5), bbox_head=dict( type='DeformableDETRHead', num_classes=80, sync_cls_avg_factor=True, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', match_costs=[ dict(type='FocalLossCost', weight=2.0), dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), dict(type='IoUCost', iou_mode='giou', weight=2.0) ])), test_cfg=dict(max_per_img=100)) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[ [ dict( type='RandomChoiceResize', scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], keep_ratio=True) ], [ dict( type='RandomChoiceResize', # The radio of all image in train dataset < 7 # follow the original implement scales=[(400, 4200), (500, 4200), (600, 4200)], keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='RandomChoiceResize', scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], keep_ratio=True) ] ]), dict(type='PackDetInputs') ] train_dataloader = dict( dataset=dict( filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline)) # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=0.0002, weight_decay=0.0001), clip_grad=dict(max_norm=0.1, norm_type=2), paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1), 'sampling_offsets': dict(lr_mult=0.1), 'reference_points': dict(lr_mult=0.1) })) # learning policy max_epochs = 50 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') param_scheduler = [ dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[40], gamma=0.1) ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (16 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=32)
5,467
33.828025
79
py
ERD
ERD-main/configs/deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py
_base_ = 'deformable-detr-refine_r50_16xb2-50e_coco.py' model = dict(as_two_stage=True)
88
28.666667
55
py
ERD
ERD-main/configs/boxinst/boxinst_r50_fpn_ms-90k_coco.py
_base_ = '../common/ms-90k_coco.py' # model settings model = dict( type='BoxInst', data_preprocessor=dict( type='BoxInstDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=32, mask_stride=4, pairwise_size=3, pairwise_dilation=2, pairwise_color_thresh=0.3, bottom_pixels_removed=10), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', # use P5 num_outs=5, relu_before_extra_convs=True), bbox_head=dict( type='BoxInstBboxHead', num_params=593, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_on_bbox=True, centerness_on_reg=True, dcn_on_last_conv=False, center_sampling=True, conv_bias=True, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), mask_head=dict( type='BoxInstMaskHead', num_layers=3, feat_channels=16, size_of_interest=8, mask_out_stride=4, topk_masks_per_img=64, mask_feature_head=dict( in_channels=256, feat_channels=128, start_level=0, end_level=2, out_channels=16, mask_stride=8, num_stacked_convs=4, norm_cfg=dict(type='BN', requires_grad=True)), loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=True, eps=5e-6, loss_weight=1.0)), # model training and testing settings test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100, mask_thr=0.5)) # optimizer optim_wrapper = dict(optimizer=dict(lr=0.01)) # evaluator val_evaluator = dict(metric=['bbox', 'segm']) test_evaluator = val_evaluator
2,693
27.659574
78
py
ERD
ERD-main/configs/boxinst/boxinst_r101_fpn_ms-90k_coco.py
_base_ = './boxinst_r50_fpn_ms-90k_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
217
23.222222
61
py
ERD
ERD-main/configs/res2net/htc_res2net-101_fpn_20e_coco.py
_base_ = '../htc/htc_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
276
24.181818
62
py
ERD
ERD-main/configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
291
25.545455
62
py
ERD
ERD-main/configs/res2net/mask-rcnn_res2net-101_fpn_2x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
287
25.181818
62
py
ERD
ERD-main/configs/res2net/cascade-rcnn_res2net-101_fpn_20e_coco.py
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
294
25.818182
62
py
ERD
ERD-main/configs/res2net/cascade-mask-rcnn_res2net-101_fpn_20e_coco.py
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
299
26.272727
64
py
ERD
ERD-main/configs/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-2x_lvis-v0.5.py
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
216
30
61
py
ERD
ERD-main/configs/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-1x_lvis-v1.py
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
436
28.133333
76
py
ERD
ERD-main/configs/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/lvis_v0.5_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300)))
424
29.357143
76
py
ERD
ERD-main/configs/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-1x_lvis-v1.py
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
436
28.133333
76
py
ERD
ERD-main/configs/lvis/mask-rcnn_x101-32x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
438
28.266667
76
py
ERD
ERD-main/configs/lvis/mask-rcnn_x101-64x4d_fpn_sample1e-3_ms-2x_lvis-v0.5.py
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
438
28.266667
76
py
ERD
ERD-main/configs/lvis/mask-rcnn_r101_fpn_sample1e-3_ms-1x_lvis-v1.py
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
214
29.714286
61
py
ERD
ERD-main/configs/lvis/mask-rcnn_r50_fpn_sample1e-3_ms-1x_lvis-v1.py
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300)))
422
29.214286
76
py
ERD
ERD-main/configs/yolof/yolof_r50-c5_8xb8-1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='YOLOF', data_preprocessor=dict( type='DetDataPreprocessor', mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], bgr_to_rgb=False, pad_size_divisor=32), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_caffe')), neck=dict( type='DilatedEncoder', in_channels=2048, out_channels=512, block_mid_channels=128, num_residual_blocks=4, block_dilations=[2, 4, 6, 8]), bbox_head=dict( type='YOLOFHead', num_classes=80, in_channels=512, reg_decoded_bbox=True, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[1, 2, 4, 8, 16], strides=[32]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1., 1., 1., 1.], add_ctr_clamp=True, ctr_clamp=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict( type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001), paramwise_cfg=dict( norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)})) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=0.00066667, by_epoch=False, begin=0, end=1500), dict( type='MultiStepLR', begin=0, end=12, by_epoch=True, milestones=[8, 11], gamma=0.1) ] train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='RandomShift', prob=0.5, max_shift_px=32), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
3,591
29.700855
77
py
ERD
ERD-main/configs/yolof/yolof_r50-c5_8xb8-iter-1x_coco.py
_base_ = './yolof_r50-c5_8xb8-1x_coco.py' # We implemented the iter-based config according to the source code. # COCO dataset has 117266 images after filtering. We use 8 gpu and # 8 batch size training, so 22500 is equivalent to # 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch, # 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large, # the iter-based and epoch-based setting have about 0.2 difference on # the mAP evaluation value. train_cfg = dict( _delete_=True, type='IterBasedTrainLoop', max_iters=22500, val_interval=4500) # learning rate policy param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=22500, by_epoch=False, milestones=[15000, 20000], gamma=0.1) ] train_dataloader = dict(sampler=dict(type='InfiniteSampler')) default_hooks = dict(checkpoint=dict(by_epoch=False, interval=2500)) log_processor = dict(by_epoch=False)
1,030
30.242424
79
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r101-gcb-r4-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
257
27.666667
56
py
ERD
ERD-main/configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5_fpn_1x_coco.py
_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
183
35.8
75
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r101-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
369
29.833333
60
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
369
29.833333
61
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r101-syncbn_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
163
31.8
75
py
ERD
ERD-main/configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r16-gcb-c3-c5_fpn_1x_coco.py
_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
390
31.583333
73
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r50-syncbn_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
162
31.6
75
py
ERD
ERD-main/configs/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
375
30.333333
60
py
ERD
ERD-main/configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
180
35.2
75
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
368
29.75
60
py
ERD
ERD-main/configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r16-gcb-c3-c5_fpn_1x_coco.py
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
387
31.333333
70
py
ERD
ERD-main/configs/gcnet/mask-rcnn_x101-32x4d-syncbn_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False))
169
33
75
py
ERD
ERD-main/configs/gcnet/mask-rcnn_x101-32x4d-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
376
30.416667
61
py
ERD
ERD-main/configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-dconv-c3-c5-r4-gcb-c3-c5_fpn_1x_coco.py
_base_ = '../dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
389
31.5
73
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r50-gcb-r16-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
257
27.666667
57
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r101-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
370
29.916667
61
py
ERD
ERD-main/configs/gcnet/cascade-mask-rcnn_x101-32x4d-syncbn-r4-gcb-c3-c5_fpn_1x_coco.py
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
386
31.25
70
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r101-gcb-r16-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, True), position='after_conv3') ]))
258
27.777778
57
py
ERD
ERD-main/configs/gcnet/mask-rcnn_r50-gcb-r4-c3-c5_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict(type='ContextBlock', ratio=1. / 4), stages=(False, True, True, True), position='after_conv3') ]))
256
27.555556
56
py
ERD
ERD-main/configs/instaboost/mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py
_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
430
27.733333
76
py
ERD
ERD-main/configs/instaboost/cascade-mask-rcnn_x101-64x4d_fpn_instaboost-4x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
438
28.266667
76
py
ERD
ERD-main/configs/instaboost/cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=(-1, 1), color_prob=0.5, hflag=False, aug_ratio=0.5), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) max_epochs = 48 param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[32, 44], gamma=0.1) ] train_cfg = dict(max_epochs=max_epochs) # only keep latest 3 checkpoints default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
1,106
26
79
py
ERD
ERD-main/configs/instaboost/mask-rcnn_r101_fpn_instaboost-4x_coco.py
_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
208
28.857143
61
py
ERD
ERD-main/configs/instaboost/cascade-mask-rcnn_r101_fpn_instaboost-4x_coco.py
_base_ = './cascade-mask-rcnn_r50_fpn_instaboost-4x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
217
26.25
61
py
ERD
ERD-main/configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict( type='InstaBoost', action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=(-1, 1), color_prob=0.5, hflag=False, aug_ratio=0.5), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) max_epochs = 48 param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[32, 44], gamma=0.1) ] train_cfg = dict(max_epochs=max_epochs) # only keep latest 3 checkpoints default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
1,095
25.731707
79
py
ERD
ERD-main/configs/detr/detr_r18_8xb2-500e_coco.py
_base_ = './detr_r50_8xb2-500e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[512]))
206
24.875
79
py
ERD
ERD-main/configs/detr/detr_r50_8xb2-500e_coco.py
_base_ = './detr_r50_8xb2-150e_coco.py' # learning policy max_epochs = 500 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=10) param_scheduler = [ dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[334], gamma=0.1) ] # only keep latest 2 checkpoints default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16)
613
23.56
71
py
ERD
ERD-main/configs/detr/detr_r50_8xb2-150e_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='DETR', num_queries=100, data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=1), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=None, num_outs=1), encoder=dict( # DetrTransformerEncoder num_layers=6, layer_cfg=dict( # DetrTransformerEncoderLayer self_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True)))), decoder=dict( # DetrTransformerDecoder num_layers=6, layer_cfg=dict( # DetrTransformerDecoderLayer self_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), cross_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True))), return_intermediate=True), positional_encoding=dict(num_feats=128, normalize=True), bbox_head=dict( type='DETRHead', num_classes=80, embed_dims=256, loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', match_costs=[ dict(type='ClassificationCost', weight=1.), dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), dict(type='IoUCost', iou_mode='giou', weight=2.0) ])), test_cfg=dict(max_per_img=100)) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', prob=0.5), dict( type='RandomChoice', transforms=[[ dict( type='RandomChoiceResize', scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], keep_ratio=True) ], [ dict( type='RandomChoiceResize', scales=[(400, 1333), (500, 1333), (600, 1333)], keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='RandomChoiceResize', scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], keep_ratio=True) ]]), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), clip_grad=dict(max_norm=0.1, norm_type=2), paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) # learning policy max_epochs = 150 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') param_scheduler = [ dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[100], gamma=0.1) ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16)
5,433
33.833333
79
py
ERD
ERD-main/configs/detr/detr_r101_8xb2-500e_coco.py
_base_ = './detr_r50_8xb2-500e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
196
23.625
61
py
ERD
ERD-main/configs/atss/atss_r18_fpn_8xb8-amp-lsj-200e_coco.py
_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=18, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
232
28.125
79
py
ERD
ERD-main/configs/atss/atss_r50_fpn_8xb8-amp-lsj-200e_coco.py
_base_ = '../common/lsj-200e_coco-detection.py' image_size = (1024, 1024) batch_augments = [dict(type='BatchFixedSizePad', size=image_size)] model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=32, batch_augments=batch_augments), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) train_dataloader = dict(batch_size=8, num_workers=4) # Enable automatic-mixed-precision training with AmpOptimWrapper. optim_wrapper = dict( type='AmpOptimWrapper', optimizer=dict( type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,536
29.939024
79
py
ERD
ERD-main/configs/atss/atss_r101_fpn_1x_coco.py
_base_ = './atss_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
192
26.571429
61
py
ERD
ERD-main/configs/atss/atss_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='ATSS', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=32), backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
2,164
29.069444
79
py
ERD
ERD-main/configs/atss/atss_r101_fpn_8xb8-amp-lsj-200e_coco.py
_base_ = './atss_r50_fpn_8xb8-amp-lsj-200e_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
208
25.125
61
py
ERD
ERD-main/configs/ld/ld_r34-gflv1-r101_fpn_1x_coco.py
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] model = dict( backbone=dict( type='ResNet', depth=34, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')), neck=dict( type='FPN', in_channels=[64, 128, 256, 512], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5))
569
27.5
79
py
ERD
ERD-main/configs/ld/ld_r50-gflv1-r101_fpn_1x_coco.py
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5))
572
27.65
79
py
ERD
ERD-main/configs/ld/ld_r18-gflv1-r101_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa model = dict( type='KnowledgeDistillationSingleStageDetector', data_preprocessor=dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=32), teacher_config='configs/gfl/gfl_r101_fpn_ms-2x_coco.py', teacher_ckpt=teacher_ckpt, backbone=dict( type='ResNet', depth=18, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), neck=dict( type='FPN', in_channels=[64, 128, 256, 512], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='LDHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), loss_ld=dict( type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), reg_max=16, loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
2,361
32.267606
163
py
ERD
ERD-main/configs/ld/ld_r101-gflv1-r101-dcn_fpn_2x_coco.py
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py'] teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa model = dict( teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py', teacher_ckpt=teacher_ckpt, backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5)) max_epochs = 24 param_scheduler = [ dict( type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[16, 22], gamma=0.1) ] train_cfg = dict(max_epochs=max_epochs) # multi-scale training train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomResize', scale=[(1333, 480), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
1,608
31.18
187
py
ERD
ERD-main/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] # model settings data_preprocessor = dict( type='DetDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_size_divisor=32) model = dict( type='YOLOV3', data_preprocessor=data_preprocessor, backbone=dict( type='MobileNetV2', out_indices=(2, 4, 6), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), neck=dict( type='YOLOV3Neck', num_scales=3, in_channels=[320, 96, 32], out_channels=[96, 96, 96]), bbox_head=dict( type='YOLOV3Head', num_classes=80, in_channels=[96, 96, 96], out_channels=[96, 96, 96], anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/coco/' # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 # backend_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) backend_args = None train_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=data_preprocessor['mean'], to_rgb=data_preprocessor['bgr_to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='Resize', scale=(416, 416), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=24, num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type='RepeatDataset', # use RepeatDataset to speed up training times=10, dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instances_train2017.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline, backend_args=backend_args))) val_dataloader = dict( batch_size=24, num_workers=4, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instances_val2017.json', data_prefix=dict(img='val2017/'), test_mode=True, pipeline=test_pipeline, backend_args=backend_args)) test_dataloader = val_dataloader val_evaluator = dict( type='CocoMetric', ann_file=data_root + 'annotations/instances_val2017.json', metric='bbox', backend_args=backend_args) test_evaluator = val_evaluator train_cfg = dict(max_epochs=30) # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005), clip_grad=dict(max_norm=35, norm_type=2)) # learning policy param_scheduler = [ dict( type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0, end=4000), dict(type='MultiStepLR', by_epoch=True, milestones=[24, 28], gamma=0.1) ] find_unused_parameters = True # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (24 samples per GPU) auto_scale_lr = dict(base_batch_size=192)
5,645
30.898305
79
py
ERD
ERD-main/configs/yolo/yolov3_d53_8xb8-320-273e_coco.py
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' input_size = (320, 320) train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), # `mean` and `to_rgb` should be the same with the `preprocess_cfg` dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', scale=input_size, keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='Resize', scale=input_size, keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader
1,157
37.6
73
py
ERD
ERD-main/configs/yolo/yolov3_d53_8xb8-amp-ms-608-273e_coco.py
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' # fp16 settings optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic')
132
32.25
66
py
ERD
ERD-main/configs/yolo/yolov3_d53_8xb8-ms-608-273e_coco.py
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] # model settings data_preprocessor = dict( type='DetDataPreprocessor', mean=[0, 0, 0], std=[255., 255., 255.], bgr_to_rgb=True, pad_size_divisor=32) model = dict( type='YOLOV3', data_preprocessor=data_preprocessor, backbone=dict( type='Darknet', depth=53, out_indices=(3, 4, 5), init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')), neck=dict( type='YOLOV3Neck', num_scales=3, in_channels=[1024, 512, 256], out_channels=[512, 256, 128]), bbox_head=dict( type='YOLOV3Head', num_classes=80, in_channels=[512, 256, 128], out_channels=[1024, 512, 256], anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' # Example to use different file client # Method 1: simply set the data root and let the file I/O module # automatically infer from prefix (not support LMDB and Memcache yet) # data_root = 's3://openmmlab/datasets/detection/coco/' # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 # backend_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) backend_args = None train_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=data_preprocessor['mean'], to_rgb=data_preprocessor['bgr_to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='RandomResize', scale=[(320, 320), (608, 608)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='Resize', scale=(608, 608), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=8, num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), batch_sampler=dict(type='AspectRatioBatchSampler'), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instances_train2017.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline, backend_args=backend_args)) val_dataloader = dict( batch_size=1, num_workers=2, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instances_val2017.json', data_prefix=dict(img='val2017/'), test_mode=True, pipeline=test_pipeline, backend_args=backend_args)) test_dataloader = val_dataloader val_evaluator = dict( type='CocoMetric', ann_file=data_root + 'annotations/instances_val2017.json', metric='bbox', backend_args=backend_args) test_evaluator = val_evaluator train_cfg = dict(max_epochs=273, val_interval=7) # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005), clip_grad=dict(max_norm=35, norm_type=2)) # learning policy param_scheduler = [ dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=2000), dict(type='MultiStepLR', by_epoch=True, milestones=[218, 246], gamma=0.1) ] default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=7)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
5,442
31.39881
79
py
ERD
ERD-main/configs/yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py
_base_ = ['./yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) # yapf:enable input_size = (320, 320) train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), # `mean` and `to_rgb` should be the same with the `preprocess_cfg` dict( type='Expand', mean=[123.675, 116.28, 103.53], to_rgb=True, ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', scale=input_size, keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='Resize', scale=input_size, keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader
1,505
34.023256
76
py
ERD
ERD-main/configs/yolo/yolov3_d53_8xb8-ms-416-273e_coco.py
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py' train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True), # `mean` and `to_rgb` should be the same with the `preprocess_cfg` dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='Resize', scale=(416, 416), keep_ratio=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader
1,153
38.793103
79
py
ERD
ERD-main/configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) # dataset settings train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) train_cfg = dict(val_interval=24)
3,534
35.822917
79
py
ERD
ERD-main/configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
222
30.857143
66
py
ERD
ERD-main/configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
199
32.333333
70
py
ERD
ERD-main/configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) # dataset settings train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' train_dataloader = dict( dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v1_train.json', data_prefix=dict(img=''), pipeline=train_pipeline)) val_dataloader = dict( dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v1_val.json', data_prefix=dict(img=''))) test_dataloader = val_dataloader val_evaluator = dict( type='LVISMetric', ann_file=data_root + 'annotations/lvis_v1_val.json', metric=['bbox', 'segm']) test_evaluator = val_evaluator train_cfg = dict(val_interval=24)
1,811
29.2
73
py
ERD
ERD-main/configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py
_base_ = [ '../_base_/models/cascade-mask-rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=1203, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) # dataset settings train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' train_dataloader = dict( dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v1_train.json', data_prefix=dict(img=''), pipeline=train_pipeline)) val_dataloader = dict( dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/lvis_v1_val.json', data_prefix=dict(img=''))) test_dataloader = val_dataloader val_evaluator = dict( type='LVISMetric', ann_file=data_root + 'annotations/lvis_v1_val.json', metric=['bbox', 'segm']) test_evaluator = val_evaluator train_cfg = dict(val_interval=24)
4,108
34.119658
79
py
ERD
ERD-main/configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py
_base_ = './mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py' # noqa: E501 model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
248
34.571429
92
py
ERD
ERD-main/configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py
_base_ = './cascade-mask-rcnn_r101_fpn_seesaw-loss_random-ms-2x_lvis-v1.py' # noqa: E501 model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
218
35.5
89
py
ERD
ERD-main/configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py
_base_ = [ '../_base_/models/mask-rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) # dataset settings train_pipeline = [ dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='RandomChoiceResize', scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], keep_ratio=True), dict(type='RandomFlip', prob=0.5), dict(type='PackDetInputs') ] train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline))) train_cfg = dict(val_interval=24)
1,237
30.74359
76
py