repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/configs/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 456 | 25.882353 | 76 |
py
|
ERD
|
ERD-main/configs/gfl/gfl_r50_fpn_ms-2x_coco.py
|
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 774 | 25.724138 | 79 |
py
|
ERD
|
ERD-main/configs/gfl/gfl_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='GFL',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 1,986 | 28.656716 | 79 |
py
|
ERD
|
ERD-main/configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 524 | 31.8125 | 72 |
py
|
ERD
|
ERD-main/configs/tridentnet/tridentnet_r50-caffe_1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='TridentFasterRCNN',
backbone=dict(
type='TridentResNet',
trident_dilations=(1, 2, 3),
num_branch=3,
test_branch_idx=1,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1),
train_cfg=dict(
rpn_proposal=dict(max_per_img=500),
rcnn=dict(
sampler=dict(num=128, pos_fraction=0.5,
add_gt_as_proposals=False))))
| 748 | 31.565217 | 74 |
py
|
ERD
|
ERD-main/configs/tridentnet/tridentnet_r50-caffe_ms-1x_coco.py
|
_base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 505 | 30.625 | 73 |
py
|
ERD
|
ERD-main/configs/tridentnet/tridentnet_r50-caffe_ms-3x_coco.py
|
_base_ = 'tridentnet_r50-caffe_ms-1x_coco.py'
# learning rate
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
| 431 | 21.736842 | 79 |
py
|
ERD
|
ERD-main/configs/ssd/ssd512_coco.py
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 2,132 | 33.967213 | 76 |
py
|
ERD
|
ERD-main/configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=1)
model = dict(
type='SingleStageDetector',
data_preprocessor=data_preprocessor,
backbone=dict(
type='MobileNetV2',
out_indices=(4, 7),
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
neck=dict(
type='SSDNeck',
in_channels=(96, 1280),
out_channels=(96, 1280, 512, 256, 256, 128),
level_strides=(2, 2, 2, 2),
level_paddings=(1, 1, 1, 1),
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)),
bbox_head=dict(
type='SSDHead',
in_channels=(96, 1280, 512, 256, 256, 128),
num_classes=80,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
# set anchor size manually instead of using the predefined
# SSD300 setting.
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
env_cfg = dict(cudnn_benchmark=True)
# dataset settings
input_size = 320
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=data_preprocessor['mean'],
to_rgb=data_preprocessor['bgr_to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=24,
num_workers=4,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule
max_epochs = 120
train_cfg = dict(max_epochs=max_epochs, val_interval=5)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='CosineAnnealingLR',
begin=0,
T_max=max_epochs,
end=max_epochs,
by_epoch=True,
eta_min=0)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (24 samples per GPU)
auto_scale_lr = dict(base_batch_size=192)
| 5,043 | 30.72327 | 79 |
py
|
ERD
|
ERD-main/configs/ssd/ssd300_coco.py
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}})))
val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 2,414 | 32.541667 | 74 |
py
|
ERD
|
ERD-main/configs/dcnv2/faster-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 216 | 35.166667 | 74 |
py
|
ERD
|
ERD-main/configs/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 212 | 34.5 | 74 |
py
|
ERD
|
ERD-main/configs/dcnv2/faster-rcnn_r50-mdconv-group4-c3-c5_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 216 | 35.166667 | 74 |
py
|
ERD
|
ERD-main/configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='ModulatedDeformRoIPoolPack',
output_size=7,
output_channels=256),
out_channels=256,
featmap_strides=[4, 8, 16, 32])))
| 417 | 31.153846 | 56 |
py
|
ERD
|
ERD-main/configs/dcnv2/mask-rcnn_r50-mdconv-c3-c5_fpn_amp-1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
| 393 | 34.818182 | 74 |
py
|
ERD
|
ERD-main/configs/nas_fpn/retinanet_r50_fpn_crop640-50e_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=64,
batch_augments=[dict(type='BatchFixedSizePad', size=(640, 640))]),
backbone=dict(norm_eval=False),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(640, 640), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 50e
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 2,521 | 30.924051 | 79 |
py
|
ERD
|
ERD-main/configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py
|
_base_ = './retinanet_r50_fpn_crop640-50e_coco.py'
# model settings
model = dict(
# `pad_size_divisor=128` ensures the feature maps sizes
# in `NAS_FPN` won't mismatch.
data_preprocessor=dict(pad_size_divisor=128),
neck=dict(
_delete_=True,
type='NASFPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
stack_times=7,
start_level=1,
norm_cfg=dict(type='BN', requires_grad=True)))
| 480 | 27.294118 | 59 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet_s_8xb32-300e_coco.py
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
| 2,096 | 32.285714 | 126 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet-ins_l_8xb32-300e_coco.py
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='RTMDetInsSepBNHead',
num_classes=80,
in_channels=256,
stacked_convs=2,
share_conv=True,
pred_kernel_size=1,
feat_channels=256,
act_cfg=dict(type='SiLU', inplace=True),
norm_cfg=dict(type='SyncBN', requires_grad=True),
anchor_generator=dict(
type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type='DistancePointBBoxCoder'),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_mask=dict(
type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
mask_thr_binary=0.5),
)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_dataloader = dict(pin_memory=True, dataset=dict(pipeline=train_pipeline))
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
val_evaluator = dict(metric=['bbox', 'segm'])
test_evaluator = val_evaluator
| 3,140 | 28.914286 | 79 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet_m_8xb32-300e_coco.py
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
model = dict(
backbone=dict(deepen_factor=0.67, widen_factor=0.75),
neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
bbox_head=dict(in_channels=192, feat_channels=192))
| 250 | 34.857143 | 79 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet-ins_s_8xb32-300e_coco.py
|
_base_ = './rtmdet-ins_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
| 2,492 | 29.777778 | 126 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet_l_8xb32-300e_coco.py
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', './rtmdet_tta.py'
]
model = dict(
type='RTMDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
batch_augments=None),
backbone=dict(
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=1,
widen_factor=1,
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU', inplace=True)),
neck=dict(
type='CSPNeXtPAFPN',
in_channels=[256, 512, 1024],
out_channels=256,
num_csp_blocks=3,
expand_ratio=0.5,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU', inplace=True)),
bbox_head=dict(
type='RTMDetSepBNHead',
num_classes=80,
in_channels=256,
stacked_convs=2,
feat_channels=256,
anchor_generator=dict(
type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type='DistancePointBBoxCoder'),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
with_objectness=False,
exp_on_reg=True,
share_conv=True,
pred_kernel_size=1,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU', inplace=True)),
train_cfg=dict(
assigner=dict(type='DynamicSoftLabelAssigner', topk=13),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=30000,
min_bbox_size=0,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.65),
max_per_img=300),
)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(640, 640), keep_ratio=True),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=32,
num_workers=10,
batch_sampler=None,
pin_memory=True,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(
batch_size=5, num_workers=10, dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
max_epochs = 300
stage2_num_epochs = 20
base_lr = 0.004
interval = 10
train_cfg = dict(
max_epochs=max_epochs,
val_interval=interval,
dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)])
val_evaluator = dict(proposal_nums=(100, 1, 10))
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
# use cosine lr from 150 to 300 epoch
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=max_epochs // 2,
end=max_epochs,
T_max=max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]
# hooks
default_hooks = dict(
checkpoint=dict(
interval=interval,
max_keep_ckpts=3 # only keep latest 3 checkpoints
))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=max_epochs - stage2_num_epochs,
switch_pipeline=train_pipeline_stage2)
]
| 5,222 | 28.178771 | 73 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet-ins_x_8xb16-300e_coco.py
|
_base_ = './rtmdet-ins_l_8xb32-300e_coco.py'
model = dict(
backbone=dict(deepen_factor=1.33, widen_factor=1.25),
neck=dict(
in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4),
bbox_head=dict(in_channels=320, feat_channels=320))
base_lr = 0.002
# optimizer
optim_wrapper = dict(optimizer=dict(lr=base_lr))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
# use cosine lr from 150 to 300 epoch
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=_base_.max_epochs // 2,
end=_base_.max_epochs,
T_max=_base_.max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]
| 795 | 23.875 | 74 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet_tta.py
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.6), max_per_img=100))
img_scales = [(640, 640), (320, 320), (960, 960)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=s, keep_ratio=True)
for s in img_scales
],
[
# ``RandomFlip`` must be placed before ``Pad``, otherwise
# bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='Pad',
size=(960, 960),
pad_val=dict(img=(114, 114, 114))),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction'))
]
])
]
| 1,170 | 31.527778 | 78 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet_tiny_8xb32-300e_coco.py
|
_base_ = './rtmdet_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96, exp_on_reg=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 1,435 | 31.636364 | 129 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet-ins_tiny_8xb32-300e_coco.py
|
_base_ = './rtmdet-ins_s_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.167,
widen_factor=0.375,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1),
bbox_head=dict(in_channels=96, feat_channels=96))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='CachedMosaic',
img_scale=(640, 640),
pad_val=114.0,
max_cached_images=20,
random_pop=False),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=10,
random_pop=False,
pad_val=(114, 114, 114),
prob=0.5),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 1,546 | 30.571429 | 129 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet_x_8xb32-300e_coco.py
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
model = dict(
backbone=dict(deepen_factor=1.33, widen_factor=1.25),
neck=dict(
in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4),
bbox_head=dict(in_channels=320, feat_channels=320))
| 260 | 31.625 | 74 |
py
|
ERD
|
ERD-main/configs/rtmdet/rtmdet-ins_m_8xb32-300e_coco.py
|
_base_ = './rtmdet-ins_l_8xb32-300e_coco.py'
model = dict(
backbone=dict(deepen_factor=0.67, widen_factor=0.75),
neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2),
bbox_head=dict(in_channels=192, feat_channels=192))
| 254 | 35.428571 | 79 |
py
|
ERD
|
ERD-main/configs/rtmdet/classification/cspnext-m_8xb256-rsb-a1-600e_in1k.py
|
_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py'
model = dict(
backbone=dict(deepen_factor=0.67, widen_factor=0.75),
head=dict(in_channels=768))
| 155 | 25 | 57 |
py
|
ERD
|
ERD-main/configs/rtmdet/classification/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py
|
_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py'
model = dict(
backbone=dict(deepen_factor=0.167, widen_factor=0.375),
head=dict(in_channels=384))
| 157 | 25.333333 | 59 |
py
|
ERD
|
ERD-main/configs/rtmdet/classification/cspnext-s_8xb256-rsb-a1-600e_in1k.py
|
_base_ = [
'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py',
'mmcls::_base_/schedules/imagenet_bs2048_rsb.py',
'mmcls::_base_/default_runtime.py'
]
model = dict(
type='ImageClassifier',
backbone=dict(
type='mmdet.CSPNeXt',
arch='P5',
out_indices=(4, ),
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
channel_attention=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='mmdet.SiLU')),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
train_cfg=dict(augments=[
dict(type='Mixup', alpha=0.2),
dict(type='CutMix', alpha=1.0)
]))
# dataset settings
train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
# schedule settings
optim_wrapper = dict(
optimizer=dict(weight_decay=0.01),
paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
)
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=595,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=600)
]
train_cfg = dict(by_epoch=True, max_epochs=600)
| 1,629 | 24.076923 | 76 |
py
|
ERD
|
ERD-main/configs/rtmdet/classification/cspnext-l_8xb256-rsb-a1-600e_in1k.py
|
_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py'
model = dict(
backbone=dict(deepen_factor=1, widen_factor=1),
head=dict(in_channels=1024))
| 150 | 24.166667 | 51 |
py
|
ERD
|
ERD-main/configs/rtmdet/classification/cspnext-x_8xb256-rsb-a1-600e_in1k.py
|
_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py'
model = dict(
backbone=dict(deepen_factor=1.33, widen_factor=1.25),
head=dict(in_channels=1280))
| 156 | 25.166667 | 57 |
py
|
ERD
|
ERD-main/configs/paa/paa_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='PAA',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 2,384 | 28.444444 | 79 |
py
|
ERD
|
ERD-main/configs/paa/paa_r101_fpn_ms-3x_coco.py
|
_base_ = './paa_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 194 | 26.857143 | 61 |
py
|
ERD
|
ERD-main/configs/paa/paa_r50_fpn_2x_coco.py
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
| 399 | 20.052632 | 79 |
py
|
ERD
|
ERD-main/configs/paa/paa_r101_fpn_1x_coco.py
|
_base_ = './paa_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 191 | 26.428571 | 61 |
py
|
ERD
|
ERD-main/configs/paa/paa_r50_fpn_1.5x_coco.py
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 18
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[12, 16],
gamma=0.1)
]
# training schedule for 1.5x
train_cfg = dict(max_epochs=max_epochs)
| 401 | 20.157895 | 79 |
py
|
ERD
|
ERD-main/configs/paa/paa_r50_fpn_ms-3x_coco.py
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 777 | 24.933333 | 79 |
py
|
ERD
|
ERD-main/configs/paa/paa_r101_fpn_2x_coco.py
|
_base_ = './paa_r101_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
| 400 | 20.105263 | 79 |
py
|
ERD
|
ERD-main/configs/yolact/yolact_r50_1xb8-55e_coco.py
|
_base_ = [
'../_base_/datasets/coco_instance.py', '../_base_/default_runtime.py'
]
img_norm_cfg = dict(
mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True)
# model settings
input_size = 550
model = dict(
type='YOLACT',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=img_norm_cfg['mean'],
std=img_norm_cfg['std'],
bgr_to_rgb=img_norm_cfg['to_rgb'],
pad_mask=True),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1, # do not freeze stem
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False, # update the statistics of bn
zero_init_residual=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5,
upsample_cfg=dict(mode='bilinear')),
bbox_head=dict(
type='YOLACTHead',
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True),
mask_head=dict(
type='YOLACTProtonet',
in_channels=256,
num_protos=32,
num_classes=80,
max_masks_to_train=100,
loss_mask_weight=6.125,
with_seg_branch=True,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'), # YOLACT should use PseudoSampler
# smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
mask_thr=0.5,
iou_thr=0.5,
top_k=200,
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=4,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
max_epochs = 55
# training schedule for 55e
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 42, 49, 52],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (1 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
| 5,373 | 30.426901 | 79 |
py
|
ERD
|
ERD-main/configs/yolact/yolact_r50_8xb8-55e_coco.py
|
_base_ = 'yolact_r50_1xb8-55e_coco.py'
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(lr=8e-3),
clip_grad=dict(max_norm=35, norm_type=2))
# learning rate
max_epochs = 55
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 42, 49, 52],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 652 | 26.208333 | 79 |
py
|
ERD
|
ERD-main/configs/yolact/yolact_r101_1xb8-55e_coco.py
|
_base_ = './yolact_r50_1xb8-55e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 197 | 23.75 | 61 |
py
|
ERD
|
ERD-main/configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True)
# model settings
model = dict(
type='CornerNet',
data_preprocessor=data_preprocessor,
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
# The cropped images are padded into squares during training,
# but may be smaller than crop_size.
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
# Make sure the output is always crop_size.
dict(type='Resize', scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args=_base_.backend_args,
),
# don't need Resize
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb']),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'border'))
]
train_dataloader = dict(
batch_size=6,
num_workers=3,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='Adam', lr=0.0005),
clip_grad=dict(max_norm=35, norm_type=2))
max_epochs = 210
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[180],
gamma=0.1)
]
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=48)
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'),
max_per_img=100))
tta_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args=_base_.backend_args),
dict(
type='TestTimeAug',
transforms=[
[
# ``RandomFlip`` must be placed before ``RandomCenterCropPad``,
# otherwise bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
mean=data_preprocessor['mean'],
std=data_preprocessor['std'],
# Image data is not converted to rgb.
to_rgb=data_preprocessor['bgr_to_rgb'])
],
[dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'flip', 'flip_direction', 'border'))
]
])
]
| 5,555 | 29.195652 | 79 |
py
|
ERD
|
ERD-main/configs/cornernet/cornernet_hourglass104_10xb5-crop511-210e-mstest_coco.py
|
_base_ = './cornernet_hourglass104_8xb6-210e-mstest_coco.py'
train_dataloader = dict(batch_size=5)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (10 GPUs) x (5 samples per GPU)
auto_scale_lr = dict(base_batch_size=50)
| 288 | 31.111111 | 60 |
py
|
ERD
|
ERD-main/configs/cornernet/cornernet_hourglass104_32xb3-210e-mstest_coco.py
|
_base_ = './cornernet_hourglass104_8xb6-210e-mstest_coco.py'
train_dataloader = dict(batch_size=3)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
| 288 | 31.111111 | 60 |
py
|
ERD
|
ERD-main/configs/resnet_strikes_back/cascade-mask-rcnn_r50-rsb-pre_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
| 620 | 37.8125 | 135 |
py
|
ERD
|
ERD-main/configs/resnet_strikes_back/retinanet_r50-rsb-pre_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
| 613 | 37.375 | 135 |
py
|
ERD
|
ERD-main/configs/resnet_strikes_back/faster-rcnn_r50-rsb-pre_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
| 615 | 37.5 | 135 |
py
|
ERD
|
ERD-main/configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
| 612 | 37.3125 | 135 |
py
|
ERD
|
ERD-main/configs/crowddet/crowddet-rcnn_refine_r50_fpn_8xb2-30e_crowdhuman.py
|
_base_ = './crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py'
model = dict(roi_head=dict(bbox_head=dict(with_refine=True)))
| 121 | 29.5 | 61 |
py
|
ERD
|
ERD-main/configs/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py
|
_base_ = ['../_base_/default_runtime.py']
model = dict(
type='CrowdDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
pad_size_divisor=64,
# This option is set according to https://github.com/Purkialo/CrowdDet/
# blob/master/lib/data/CrowdHuman.py The images in the entire batch are
# resize together.
batch_augments=[
dict(type='BatchResize', scale=(1400, 800), pad_size_divisor=64)
]),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
upsample_cfg=dict(mode='bilinear', align_corners=False)),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0, 2.0, 3.0],
strides=[4, 8, 16, 32, 64],
centers=[(8, 8), (8, 8), (8, 8), (8, 8), (8, 8)]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
clip_border=False),
loss_cls=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='MultiInstanceRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=-1,
aligned=True,
use_torchvision=True),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='MultiInstanceBBoxHead',
with_refine=False,
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
loss_weight=1.0,
use_sigmoid=False,
reduction='none'),
loss_bbox=dict(
type='SmoothL1Loss', loss_weight=1.0, reduction='none'))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=(0.3, 0.7),
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2400,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=2),
rcnn=dict(
assigner=dict(
type='MultiInstanceAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.3,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='MultiInsRandomSampler',
num=512,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1200,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=2),
rcnn=dict(
nms=dict(type='nms', iou_threshold=0.5),
score_thr=0.01,
max_per_img=500)))
dataset_type = 'CrowdHumanDataset'
data_root = 'data/CrowdHuman/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/tracking/CrowdHuman/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/tracking/',
# 'data/': 's3://openmmlab/datasets/tracking/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
'flip_direction'))
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1400, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=None, # The 'batch_sampler' may decrease the precision
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotation_train.odgt',
data_prefix=dict(img='Images/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotation_val.odgt',
data_prefix=dict(img='Images/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CrowdHumanMetric',
ann_file=data_root + 'annotation_val.odgt',
metric=['AP', 'MR', 'JI'],
backend_args=backend_args)
test_evaluator = val_evaluator
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=30, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=800),
dict(
type='MultiStepLR',
begin=0,
end=30,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
# optimizer
auto_scale_lr = dict(base_batch_size=16)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001))
| 7,480 | 31.811404 | 79 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco.py
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args={{_base_.backend_args}})
test_evaluator = val_evaluator
| 2,968 | 28.39604 | 78 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_r101_8xb2-lsj-50e_coco.py
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py']
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 208 | 25.125 | 61 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py
|
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
| 1,471 | 37.736842 | 124 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_8xb2-lsj-50e_coco-panoptic.py
|
_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
| 295 | 48.333333 | 128 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic.py
|
_base_ = './mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 215 | 26 | 61 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic.py
|
_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536]))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_iters = 737500
param_scheduler = dict(end=max_iters, milestones=[655556, 710184])
# Before 735001th iteration, we do evaluation every 5000 iterations.
# After 735000th iteration, we do evaluation every 737500 iterations,
# which means that we do evaluation at the end of training.'
interval = 5000
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
train_cfg = dict(
max_iters=max_iters,
val_interval=interval,
dynamic_intervals=dynamic_intervals)
| 999 | 37.461538 | 129 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
| 1,978 | 32.542373 | 123 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py
|
_base_ = [
'../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=255)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=255,
batch_augments=batch_augments)
num_things_classes = 80
num_stuff_classes = 53
num_classes = num_things_classes + num_stuff_classes
model = dict(
type='Mask2Former',
data_preprocessor=data_preprocessor,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
panoptic_head=dict(
type='Mask2FormerHead',
in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside
strides=[4, 8, 16, 32],
feat_channels=256,
out_channels=256,
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
num_queries=100,
num_transformer_feat_level=3,
pixel_decoder=dict(
type='MSDeformAttnPixelDecoder',
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict( # DeformableDetrTransformerEncoder
num_layers=6,
layer_cfg=dict( # DeformableDetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiScaleDeformableAttention
embed_dims=256,
num_heads=8,
num_levels=3,
num_points=4,
dropout=0.0,
batch_first=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True)))),
positional_encoding=dict(num_feats=128, normalize=True)),
enforce_decoder_input_project=False,
positional_encoding=dict(num_feats=128, normalize=True),
transformer_decoder=dict( # Mask2FormerTransformerDecoder
return_intermediate=True,
num_layers=9,
layer_cfg=dict( # Mask2FormerTransformerDecoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.0,
batch_first=True),
cross_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.0,
batch_first=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True))),
init_cfg=None),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
reduction='mean',
class_weight=[1.0] * num_classes + [0.1]),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
reduction='mean',
naive_dice=True,
eps=1.0,
loss_weight=5.0)),
panoptic_fusion_head=dict(
type='MaskFormerFusionHead',
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_panoptic=None,
init_cfg=None),
train_cfg=dict(
num_points=12544,
oversample_ratio=3.0,
importance_sample_ratio=0.75,
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='ClassificationCost', weight=2.0),
dict(
type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),
dict(type='DiceCost', weight=5.0, pred_act=True, eps=1.0)
]),
sampler=dict(type='MaskPseudoSampler')),
test_cfg=dict(
panoptic_on=True,
# For now, the dataset does not support
# evaluating semantic segmentation metric.
semantic_on=False,
instance_on=True,
# max_per_image is for instance segmentation.
max_per_image=100,
iou_thr=0.8,
# In Mask2Former's panoptic postprocessing,
# it will filter mask area where score is less than 0.5 .
filter_low_score=True),
init_cfg=None)
# dataset settings
data_root = 'data/coco/'
train_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True,
backend_args={{_base_.backend_args}}),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_evaluator = [
dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args={{_base_.backend_args}}),
dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
backend_args={{_base_.backend_args}})
]
test_evaluator = val_evaluator
# optimizer
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999)),
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi,
},
norm_decay_mult=0.0),
clip_grad=dict(max_norm=0.01, norm_type=2))
# learning policy
max_iters = 368750
param_scheduler = dict(
type='MultiStepLR',
begin=0,
end=max_iters,
by_epoch=False,
milestones=[327778, 355092],
gamma=0.1)
# Before 365001th iteration, we do evaluation every 5000 iterations.
# After 365000th iteration, we do evaluation every 368750 iterations,
# which means that we do evaluation at the end of training.
interval = 5000
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
train_cfg = dict(
type='IterBasedTrainLoop',
max_iters=max_iters,
val_interval=interval,
dynamic_intervals=dynamic_intervals)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
default_hooks = dict(
checkpoint=dict(
type='CheckpointHook',
by_epoch=False,
save_last=True,
max_keep_ckpts=3,
interval=interval))
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=False)
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 8,206 | 31.56746 | 79 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-s-p4-w7-224_8xb2-lsj-50e_coco.py
|
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
depths=depths, init_cfg=dict(type='Pretrained',
checkpoint=pretrained)))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
| 1,462 | 37.5 | 124 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py
|
_base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
pretrain_img_size=384,
embed_dims=128,
depths=depths,
num_heads=[4, 8, 16, 32],
window_size=12,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(in_channels=[128, 256, 512, 1024]))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
| 1,614 | 36.55814 | 124 |
py
|
ERD
|
ERD-main/configs/mask2former/mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco.py
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
| 1,967 | 33.526316 | 123 |
py
|
ERD
|
ERD-main/configs/point_rend/point-rend_r50-caffe_fpn_ms-3x_coco.py
|
_base_ = './point-rend_r50-caffe_fpn_ms-1x_coco.py'
max_epochs = 36
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
| 391 | 19.631579 | 79 |
py
|
ERD
|
ERD-main/configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# model settings
model = dict(
type='PointRend',
roi_head=dict(
type='PointRendRoIHead',
mask_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='concat',
roi_layer=dict(
_delete_=True, type='SimpleRoIAlign', output_size=14),
out_channels=256,
featmap_strides=[4]),
mask_head=dict(
_delete_=True,
type='CoarseMaskHead',
num_fcs=2,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
point_head=dict(
type='MaskPointHead',
num_fcs=3,
in_channels=256,
fc_channels=256,
num_classes=80,
coarse_pred_each_layer=True,
loss_point=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
mask_size=7,
num_points=14 * 14,
oversample_ratio=3,
importance_sample_ratio=0.75)),
test_cfg=dict(
rcnn=dict(
subdivision_steps=5,
subdivision_num_points=28 * 28,
scale_factor=2)))
| 1,448 | 31.2 | 75 |
py
|
ERD
|
ERD-main/configs/detectors/htc_r50-sac_1x_coco.py
|
_base_ = '../htc/htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True)))
| 245 | 26.333333 | 50 |
py
|
ERD
|
ERD-main/configs/detectors/detectors_cascade-rcnn_r50_1x_coco.py
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='torchvision://resnet50',
style='pytorch')))
| 1,053 | 30.939394 | 72 |
py
|
ERD
|
ERD-main/configs/detectors/cascade-rcnn_r50-sac_1x_coco.py
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True)))
| 382 | 28.461538 | 72 |
py
|
ERD
|
ERD-main/configs/detectors/detectors_htc-r50_1x_coco.py
|
_base_ = '../htc/htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='torchvision://resnet50',
style='pytorch')))
| 916 | 30.62069 | 57 |
py
|
ERD
|
ERD-main/configs/detectors/htc_r50-rfp_1x_coco.py
|
_base_ = '../htc/htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
pretrained='torchvision://resnet50',
style='pytorch')))
| 714 | 27.6 | 57 |
py
|
ERD
|
ERD-main/configs/detectors/cascade-rcnn_r50-rfp_1x_coco.py
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
pretrained='torchvision://resnet50',
style='pytorch')))
| 851 | 28.37931 | 72 |
py
|
ERD
|
ERD-main/configs/detectors/detectors_htc-r101_20e_coco.py
|
_base_ = '../htc/htc_r101_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='torchvision://resnet101',
style='pytorch')))
| 920 | 30.758621 | 57 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
|
_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model setting
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(clip_grad=None)
| 1,087 | 23.727273 | 66 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_x101-64x4d_fpn_gn-head_ms-640-800-2x_coco.py
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 1,429 | 25.981132 | 78 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2))
| 2,093 | 26.552632 | 78 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50-caffe_fpn_gn-head_ms-640-800-2x_coco.py
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 814 | 25.290323 | 78 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r101-caffe_fpn_gn-head-1x_coco.py
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
| 242 | 23.3 | 66 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r101-caffe_fpn_gn-head_ms-640-800-2x_coco.py
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 1,005 | 24.794872 | 78 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50-caffe_fpn_gn-head_4xb4-1x_coco.py
|
# TODO: Remove this config after benchmarking all related configs
_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=4)
| 188 | 30.5 | 65 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50-caffe_fpn_gn-head-center_1x_coco.py
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
| 146 | 28.4 | 76 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r18_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py
|
_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py' # noqa
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
| 281 | 34.25 | 100 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r101_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py
|
_base_ = './fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py' # noqa
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 257 | 31.25 | 100 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50_fpn_gn-head-center-normbbox-centeronreg-giou_8xb8-amp-lsj-200e_coco.py
|
_base_ = '../common/lsj-200e_coco-detection.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# model settings
model = dict(
type='FCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=dict(max_norm=35, norm_type=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 2,377 | 30.289474 | 79 |
py
|
ERD
|
ERD-main/configs/fcos/fcos_r50-dcn-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py
|
_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=True,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(clip_grad=None)
| 1,212 | 25.369565 | 74 |
py
|
ERD
|
ERD-main/configs/ddod/ddod_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='DDOD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='DDODHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
# assigner is mean cls_assigner
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 2,223 | 29.465753 | 79 |
py
|
ERD
|
ERD-main/configs/condinst/condinst_r50_fpn_ms-poly-90k_coco_instance.py
|
_base_ = '../common/ms-poly-90k_coco-instance.py'
# model settings
model = dict(
type='CondInst',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='CondInstBboxHead',
num_params=169,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
mask_head=dict(
type='CondInstMaskHead',
num_layers=3,
feat_channels=8,
size_of_interest=8,
mask_out_stride=4,
max_masks_to_train=300,
mask_feature_head=dict(
in_channels=256,
feat_channels=128,
start_level=0,
end_level=2,
out_channels=8,
mask_stride=8,
num_stacked_convs=4,
norm_cfg=dict(type='BN', requires_grad=True)),
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0)),
# model training and testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
mask_thr=0.5))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
| 2,492 | 27.988372 | 78 |
py
|
ERD
|
ERD-main/configs/carafe/mask-rcnn_r50_fpn-carafe_1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
roi_head=dict(
mask_head=dict(
upsample_cfg=dict(
type='carafe',
scale_factor=2,
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64))))
| 887 | 27.645161 | 52 |
py
|
ERD
|
ERD-main/configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)))
| 584 | 26.857143 | 56 |
py
|
ERD
|
ERD-main/configs/common/ms-poly-90k_coco-instance.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Align with Detectron2
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=backend_args,
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
pin_memory=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 90k
max_iter = 90000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iter, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
| 3,896 | 28.748092 | 78 |
py
|
ERD
|
ERD-main/configs/common/lsj-100e_coco-detection.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 3,769 | 29.650407 | 79 |
py
|
ERD
|
ERD-main/configs/common/ms_3x_coco.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 3,449 | 30.651376 | 79 |
py
|
ERD
|
ERD-main/configs/common/lsj-200e_coco-detection.py
|
_base_ = './lsj-100e_coco-detection.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
| 380 | 19.052632 | 69 |
py
|
ERD
|
ERD-main/configs/common/lsj-200e_coco-instance.py
|
_base_ = './lsj-100e_coco-instance.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
| 379 | 19 | 69 |
py
|
ERD
|
ERD-main/configs/common/lsj-100e_coco-instance.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 3,811 | 29.99187 | 79 |
py
|
ERD
|
ERD-main/configs/common/ssj_270k_coco-instance.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# The model is trained by 270k iterations with batch_size 64,
# which is roughly equivalent to 144 epochs.
max_iters = 270000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[243000, 256500, 263250],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 3,943 | 30.301587 | 78 |
py
|
ERD
|
ERD-main/configs/common/ssj_scp_270k_coco-instance.py
|
_base_ = 'ssj_270k_coco-instance.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Standard Scale Jittering (SSJ) resizes and crops an image
# with a resize range of 0.8 to 1.25 of the original image size.
load_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.25),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=image_size),
]
train_pipeline = [
dict(type='CopyPaste', max_num_pasted=100),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=load_pipeline,
backend_args=backend_args),
pipeline=train_pipeline))
| 1,949 | 30.967213 | 78 |
py
|
ERD
|
ERD-main/configs/common/ms_3x_coco-instance.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 3,481 | 30.944954 | 79 |
py
|
ERD
|
ERD-main/configs/common/ms-poly_3x_coco-instance.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 3,680 | 29.932773 | 79 |
py
|
ERD
|
ERD-main/configs/common/ms-90k_coco.py
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Align with Detectron2
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=backend_args,
imdecode_backend=backend),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
pin_memory=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 90k
max_iter = 90000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iter, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
| 3,754 | 29.528455 | 78 |
py
|
ERD
|
ERD-main/configs/timm_example/retinanet_timm-efficientnet-b1_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
| 725 | 29.25 | 75 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.