repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/configs/_base_/schedules/schedule_1x.py
|
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 814 | 27.103448 | 79 |
py
|
ERD
|
ERD-main/configs/_base_/schedules/schedule_2x.py
|
# training schedule for 2x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=24, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
| 815 | 27.137931 | 79 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/semi_coco_detection.py
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
color_space = [
[dict(type='ColorTransform')],
[dict(type='AutoContrast')],
[dict(type='Equalize')],
[dict(type='Sharpness')],
[dict(type='Posterize')],
[dict(type='Solarize')],
[dict(type='Color')],
[dict(type='Contrast')],
[dict(type='Brightness')],
]
geometric = [
[dict(type='Rotate')],
[dict(type='ShearX')],
[dict(type='ShearY')],
[dict(type='TranslateX')],
[dict(type='TranslateY')],
]
scale = [(1333, 400), (1333, 1200)]
branch_field = ['sup', 'unsup_teacher', 'unsup_student']
# pipeline used to augment labeled data,
# which will be sent to student model for supervised training.
sup_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=scale, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='RandAugment', aug_space=color_space, aug_num=1),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='MultiBranch',
branch_field=branch_field,
sup=dict(type='PackDetInputs'))
]
# pipeline used to augment unlabeled data weakly,
# which will be sent to teacher model for predicting pseudo instances.
weak_pipeline = [
dict(type='RandomResize', scale=scale, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')),
]
# pipeline used to augment unlabeled data strongly,
# which will be sent to student model for unsupervised training.
strong_pipeline = [
dict(type='RandomResize', scale=scale, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomOrder',
transforms=[
dict(type='RandAugment', aug_space=color_space, aug_num=1),
dict(type='RandAugment', aug_space=geometric, aug_num=1),
]),
dict(type='RandomErasing', n_patches=(1, 5), ratio=(0, 0.2)),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')),
]
# pipeline used to augment unlabeled data into different views
unsup_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadEmptyAnnotations'),
dict(
type='MultiBranch',
branch_field=branch_field,
unsup_teacher=weak_pipeline,
unsup_student=strong_pipeline,
)
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
batch_size = 5
num_workers = 5
# There are two common semi-supervised learning settings on the coco dataset:
# (1) Divide the train2017 into labeled and unlabeled datasets
# by a fixed percentage, such as 1%, 2%, 5% and 10%.
# The format of labeled_ann_file and unlabeled_ann_file are
# instances_train2017.{fold}@{percent}.json, and
# instances_train2017.{fold}@{percent}-unlabeled.json
# `fold` is used for cross-validation, and `percent` represents
# the proportion of labeled data in the train2017.
# (2) Choose the train2017 as the labeled dataset
# and unlabeled2017 as the unlabeled dataset.
# The labeled_ann_file and unlabeled_ann_file are
# instances_train2017.json and image_info_unlabeled2017.json
# We use this configuration by default.
labeled_dataset = dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=sup_pipeline,
backend_args=backend_args)
unlabeled_dataset = dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_unlabeled2017.json',
data_prefix=dict(img='unlabeled2017/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=unsup_pipeline,
backend_args=backend_args)
train_dataloader = dict(
batch_size=batch_size,
num_workers=num_workers,
persistent_workers=True,
sampler=dict(
type='GroupMultiSourceSampler',
batch_size=batch_size,
source_ratio=[1, 4]),
dataset=dict(
type='ConcatDataset', datasets=[labeled_dataset, unlabeled_dataset]))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
| 5,916 | 32.055866 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/deepfashion.py
|
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(750, 1101), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_train.json',
data_prefix=dict(img='Img/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_query.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='Anno/segmentation/DeepFashion_segmentation_gallery.json',
data_prefix=dict(img='Img/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_query.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = dict(
type='CocoMetric',
ann_file=data_root +
'Anno/segmentation/DeepFashion_segmentation_gallery.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
| 3,198 | 32.322917 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/objects365v1_detection.py
|
# dataset settings
dataset_type = 'Objects365V1Dataset'
data_root = 'data/Objects365/Obj365_v1/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/objects365_val.json',
metric='bbox',
sort_categories=True,
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
| 2,481 | 32.093333 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/coco_instance.py
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
| 3,238 | 32.739583 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/coco_instance_semantic.py
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
| 2,574 | 31.594937 | 79 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/openimages_detection.py
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thrs=0.5,
ioa_thrs=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
| 2,979 | 35.341463 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/objects365v2_detection.py
|
# dataset settings
dataset_type = 'Objects365V2Dataset'
data_root = 'data/Objects365/Obj365_v2/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/zhiyuan_objv2_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/zhiyuan_objv2_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/zhiyuan_objv2_val.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
| 2,464 | 32.310811 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/cityscapes_detection.py
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
| 2,729 | 31.117647 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/voc0712.py
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically Infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/segmentation/VOCdevkit/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline,
backend_args=backend_args),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
| 3,434 | 35.935484 | 79 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/lvis_v1_instance.py
|
# dataset settings
_base_ = 'lvis_v0.5_instance.py'
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''))))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + 'annotations/lvis_v1_val.json')
test_evaluator = val_evaluator
| 656 | 27.565217 | 73 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/cityscapes_instance.py
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm'],
backend_args=backend_args),
dict(
type='CityScapesMetric',
seg_prefix=data_root + 'gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance',
backend_args=backend_args)
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
| 3,725 | 31.684211 | 76 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/coco_detection.py
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = '../data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
| 3,190 | 32.239583 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/lvis_v0.5_instance.py
|
# dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/lvis_v0.5/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_train.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_val.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
metric=['bbox', 'segm'],
backend_args=backend_args)
test_evaluator = val_evaluator
| 2,646 | 32.0875 | 78 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/coco_panoptic.py
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
# data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
| 3,277 | 33.505263 | 79 |
py
|
ERD
|
ERD-main/configs/_base_/datasets/wider_face.py
|
# dataset settings
dataset_type = 'WIDERFaceDataset'
data_root = 'data/WIDERFace/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/cityscapes/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
img_scale = (640, 640) # VGA resolution
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='train.txt',
data_prefix=dict(img='WIDER_train'),
filter_cfg=dict(filter_empty_gt=True, bbox_min_size=17, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='val.txt',
data_prefix=dict(img='WIDER_val'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
# TODO: support WiderFace-Evaluation for easy, medium, hard cases
type='VOCMetric',
metric='mAP',
eval_mode='11points')
test_evaluator = val_evaluator
| 2,338 | 30.608108 | 78 |
py
|
ERD
|
ERD-main/configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
roi_head=dict(
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1),
rcnn=dict(
sampler=dict(
_delete_=True,
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))))
| 1,268 | 29.214286 | 68 |
py
|
ERD
|
ERD-main/configs/libra_rcnn/libra-faster-rcnn_r101_fpn_1x_coco.py
|
_base_ = './libra-faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 205 | 28.428571 | 61 |
py
|
ERD
|
ERD-main/configs/libra_rcnn/libra-faster-rcnn_x101-64x4d_fpn_1x_coco.py
|
_base_ = './libra-faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 427 | 27.533333 | 76 |
py
|
ERD
|
ERD-main/configs/libra_rcnn/libra-retinanet_r50_fpn_1x_coco.py
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=1,
refine_type='non_local')
],
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=0.11,
loss_weight=1.0)))
| 674 | 24 | 52 |
py
|
ERD
|
ERD-main/configs/libra_rcnn/libra-fast-rcnn_r50_fpn_1x_coco.py
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
# model settings
model = dict(
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
dict(
type='BFP',
in_channels=256,
num_levels=5,
refine_level=2,
refine_type='non_local')
],
roi_head=dict(
bbox_head=dict(
loss_bbox=dict(
_delete_=True,
type='BalancedL1Loss',
alpha=0.5,
gamma=1.5,
beta=1.0,
loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
sampler=dict(
_delete_=True,
type='CombinedSampler',
num=512,
pos_fraction=0.25,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))))
# MMEngine support the following two ways, users can choose
# according to convenience
# _base_.train_dataloader.dataset.proposal_file = 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl' # noqa
train_dataloader = dict(
dataset=dict(proposal_file='libra_proposals/rpn_r50_fpn_1x_train2017.pkl'))
# _base_.val_dataloader.dataset.proposal_file = 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl' # noqa
# test_dataloader = _base_.val_dataloader
val_dataloader = dict(
dataset=dict(proposal_file='libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
test_dataloader = val_dataloader
| 1,775 | 32.509434 | 104 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 4, 6, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b2.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
| 311 | 33.666667 | 66 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b1.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
| 278 | 33.875 | 66 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py
|
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_layers=[3, 4, 18, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_medium.pth')))
| 239 | 33.285714 | 66 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001))
# dataset settings
train_dataloader = dict(batch_size=1, num_workers=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
| 701 | 32.428571 | 75 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RetinaNet',
backbone=dict(
_delete_=True,
type='PyramidVisionTransformer',
num_layers=[2, 2, 2, 2],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_tiny.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001))
| 627 | 32.052632 | 72 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py
|
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_layers=[3, 4, 6, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_small.pth')))
| 237 | 33 | 66 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 4, 18, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b3.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
| 312 | 33.777778 | 66 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py
|
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_large.pth')))
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
| 349 | 37.888889 | 66 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RetinaNet',
backbone=dict(
_delete_=True,
type='PyramidVisionTransformerV2',
embed_dims=32,
num_layers=[2, 2, 2, 2],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b0.pth')),
neck=dict(in_channels=[32, 64, 160, 256]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001))
| 652 | 31.65 | 72 |
py
|
ERD
|
ERD-main/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001))
# dataset settings
train_dataloader = dict(batch_size=1, num_workers=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
| 734 | 32.409091 | 75 |
py
|
ERD
|
ERD-main/configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=image_size),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.04),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
| 3,148 | 32.147368 | 147 |
py
|
ERD
|
ERD-main/configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py
|
# We follow the original implementation which
# adopts the Caffe pre-trained backbone.
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='AutoAssign',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
relu_before_extra_convs=True,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')),
bbox_head=dict(
type='AutoAssignHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0)),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01), paramwise_cfg=dict(norm_decay_mult=0.))
| 1,923 | 26.485714 | 72 |
py
|
ERD
|
ERD-main/configs/gfl_increment/gfl_r50_fpn_1x_coco_first_40_incre_last_40_cats.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = '../data/coco/'
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
model = dict(
type='GFLIncrementERD',
ori_setting=dict( # ============> for increment <==============
ori_checkpoint_file='../ERD_results/gfl_increment/gfl_r50_fpn_1x_coco_first_40_cats/epoch_12.pth',# *********************
ori_num_classes=40, # *********************
ori_config_file='configs/gfl_increment/gfl_r50_fpn_1x_coco_first_40_cats.py'),
latest_model_flag=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHeadIncrementERD',
num_classes=80, # *********************
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(
dataset=dict(
data_root=data_root,
ann_file='annotations/instances_train2017_sel_last_40_cats.json',
data_prefix=dict(img='train2017/')))
val_dataloader = dict(
dataset=dict(
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/')))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json')
test_dataloader = val_dataloader
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
auto_scale_lr = dict(enable=True, base_batch_size=16)
| 3,820 | 31.65812 | 129 |
py
|
ERD
|
ERD-main/configs/gfl_increment/gfl_r50_fpn_1x_coco_first_40_cats.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
dataset_type = 'CocoDataset'
data_root = '../data/coco/'
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
model = dict(
type='GFL',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHead',
num_classes=40, # *********************
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(
dataset=dict(
data_root=data_root,
ann_file='annotations/instances_train2017_sel_first_40_cats.json',
data_prefix=dict(img='train2017/')))
# datasets=[new_dataset]))
val_dataloader = dict(
dataset=dict(
data_root=data_root,
ann_file='annotations/instances_val2017_sel_first_40_cats.json',
data_prefix=dict(img='val2017/')))
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017_sel_first_40_cats.json')
test_dataloader = val_dataloader
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
auto_scale_lr = dict(enable=True, base_batch_size=16)
| 3,407 | 29.702703 | 80 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../common/lsj-200e_coco-detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 711 | 31.363636 | 69 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50-caffe_fpn_ms-3x_coco.py
|
_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=36)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
| 389 | 20.666667 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_tta.py
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
], [dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
| 881 | 35.75 | 77 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50_fpn_2x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 624 | 23.038462 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50_fpn_90k_coco.py
|
_base_ = 'retinanet_r50_fpn_1x_coco.py'
# training schedule for 90k
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=90000,
val_interval=10000)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
train_dataloader = dict(sampler=dict(type='InfiniteSampler'))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
| 639 | 24.6 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50-caffe_fpn_ms-2x_coco.py
|
_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 388 | 21.882353 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r101_fpn_ms-640-800-3x_coco.py
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 343 | 33.4 | 77 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50-caffe_fpn_ms-1x_coco.py
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 470 | 28.4375 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50_fpn_ms-640-800-3x_coco.py
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 188 | 36.8 | 77 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r101_fpn_8xb8-amp-lsj-200e_coco.py
|
_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 213 | 25.75 | 61 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_x101-64x4d_fpn_ms-640-800-3x_coco.py
|
_base_ = ['../_base_/models/retinanet_r50_fpn.py', '../common/ms_3x_coco.py']
# optimizer
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
optim_wrapper = dict(optimizer=dict(type='SGD', lr=0.01))
| 365 | 29.5 | 77 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_x101-64x4d_fpn_2x_coco.py
|
_base_ = './retinanet_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 419 | 27 | 76 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r18_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# TODO: support auto scaling lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
# auto_scale_lr = dict(base_batch_size=16)
| 682 | 31.52381 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50_fpn_amp-1x_coco.py
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
| 223 | 31 | 59 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r101-caffe_fpn_1x_coco.py
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 222 | 26.875 | 67 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_x101-64x4d_fpn_1x_coco.py
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 419 | 27 | 76 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r101_fpn_2x_coco.py
|
_base_ = './retinanet_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 197 | 27.285714 | 61 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_x101-32x4d_fpn_2x_coco.py
|
_base_ = './retinanet_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 419 | 27 | 76 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r101-caffe_fpn_ms-3x_coco.py
|
_base_ = './retinanet_r50-caffe_fpn_ms-3x_coco.py'
# learning policy
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 243 | 26.111111 | 67 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_x101-32x4d_fpn_1x_coco.py
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 419 | 27 | 76 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r18_fpn_1xb8-1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# data
train_dataloader = dict(batch_size=8)
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
# Note: If the learning rate is set to 0.0025, the mAP will be 32.4.
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001))
# TODO: support auto scaling lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (1 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=8)
| 797 | 30.92 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r18_fpn_8xb8-amp-lsj-200e_coco.py
|
_base_ = './retinanet_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
| 237 | 28.75 | 79 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r101_fpn_1x_coco.py
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 197 | 27.285714 | 61 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./retinanet_tta.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 312 | 27.454545 | 75 |
py
|
ERD
|
ERD-main/configs/retinanet/retinanet_r50-caffe_fpn_1x_coco.py
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
# use caffe img_norm
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
| 507 | 28.882353 | 66 |
py
|
ERD
|
ERD-main/configs/misc/d2_mask-rcnn_r50-caffe_fpn_ms-90k_coco.py
|
_base_ = '../common/ms-poly-90k_coco-instance.py'
# model settings
model = dict(
type='Detectron2Wrapper',
bgr_to_rgb=False,
detector=dict(
# The settings in `d2_detector` will merged into default settings
# in detectron2. More details please refer to
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/defaults.py # noqa
meta_architecture='GeneralizedRCNN',
# If you want to finetune the detector, you can use the
# checkpoint released by detectron2, for example:
# weights='detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl' # noqa
weights='detectron2://ImageNetPretrained/MSRA/R-50.pkl',
mask_on=True,
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
backbone=dict(name='build_resnet_fpn_backbone', freeze_at=2),
resnets=dict(
depth=50,
out_features=['res2', 'res3', 'res4', 'res5'],
num_groups=1,
norm='FrozenBN'),
fpn=dict(
in_features=['res2', 'res3', 'res4', 'res5'], out_channels=256),
anchor_generator=dict(
name='DefaultAnchorGenerator',
sizes=[[32], [64], [128], [256], [512]],
aspect_ratios=[[0.5, 1.0, 2.0]],
angles=[[-90, 0, 90]]),
proposal_generator=dict(name='RPN'),
rpn=dict(
head_name='StandardRPNHead',
in_features=['p2', 'p3', 'p4', 'p5', 'p6'],
iou_thresholds=[0.3, 0.7],
iou_labels=[0, -1, 1],
batch_size_per_image=256,
positive_fraction=0.5,
bbox_reg_loss_type='smooth_l1',
bbox_reg_loss_weight=1.0,
bbox_reg_weights=(1.0, 1.0, 1.0, 1.0),
smooth_l1_beta=0.0,
loss_weight=1.0,
boundary_thresh=-1,
pre_nms_topk_train=2000,
post_nms_topk_train=1000,
pre_nms_topk_test=1000,
post_nms_topk_test=1000,
nms_thresh=0.7,
conv_dims=[-1]),
roi_heads=dict(
name='StandardROIHeads',
num_classes=80,
in_features=['p2', 'p3', 'p4', 'p5'],
iou_thresholds=[0.5],
iou_labels=[0, 1],
batch_size_per_image=512,
positive_fraction=0.25,
score_thresh_test=0.05,
nms_thresh_test=0.5,
proposal_append_gt=True),
roi_box_head=dict(
name='FastRCNNConvFCHead',
num_fc=2,
fc_dim=1024,
conv_dim=256,
pooler_type='ROIAlignV2',
pooler_resolution=7,
pooler_sampling_ratio=0,
bbox_reg_loss_type='smooth_l1',
bbox_reg_loss_weight=1.0,
bbox_reg_weights=(10.0, 10.0, 5.0, 5.0),
smooth_l1_beta=0.0,
cls_agnostic_bbox_reg=False),
roi_mask_head=dict(
name='MaskRCNNConvUpsampleHead',
conv_dim=256,
num_conv=4,
pooler_type='ROIAlignV2',
pooler_resolution=14,
pooler_sampling_ratio=0,
cls_agnostic_mask=False)))
| 3,229 | 37.452381 | 121 |
py
|
ERD
|
ERD-main/configs/misc/d2_faster-rcnn_r50-caffe_fpn_ms-90k_coco.py
|
_base_ = '../common/ms-90k_coco.py'
# model settings
model = dict(
type='Detectron2Wrapper',
bgr_to_rgb=False,
detector=dict(
# The settings in `d2_detector` will merged into default settings
# in detectron2. More details please refer to
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/defaults.py # noqa
meta_architecture='GeneralizedRCNN',
# If you want to finetune the detector, you can use the
# checkpoint released by detectron2, for example:
# weights='detectron2://COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/model_final_b275ba.pkl' # noqa
weights='detectron2://ImageNetPretrained/MSRA/R-50.pkl',
mask_on=False,
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
backbone=dict(name='build_resnet_fpn_backbone', freeze_at=2),
resnets=dict(
depth=50,
out_features=['res2', 'res3', 'res4', 'res5'],
num_groups=1,
norm='FrozenBN'),
fpn=dict(
in_features=['res2', 'res3', 'res4', 'res5'], out_channels=256),
anchor_generator=dict(
name='DefaultAnchorGenerator',
sizes=[[32], [64], [128], [256], [512]],
aspect_ratios=[[0.5, 1.0, 2.0]],
angles=[[-90, 0, 90]]),
proposal_generator=dict(name='RPN'),
rpn=dict(
head_name='StandardRPNHead',
in_features=['p2', 'p3', 'p4', 'p5', 'p6'],
iou_thresholds=[0.3, 0.7],
iou_labels=[0, -1, 1],
batch_size_per_image=256,
positive_fraction=0.5,
bbox_reg_loss_type='smooth_l1',
bbox_reg_loss_weight=1.0,
bbox_reg_weights=(1.0, 1.0, 1.0, 1.0),
smooth_l1_beta=0.0,
loss_weight=1.0,
boundary_thresh=-1,
pre_nms_topk_train=2000,
post_nms_topk_train=1000,
pre_nms_topk_test=1000,
post_nms_topk_test=1000,
nms_thresh=0.7,
conv_dims=[-1]),
roi_heads=dict(
name='StandardROIHeads',
num_classes=80,
in_features=['p2', 'p3', 'p4', 'p5'],
iou_thresholds=[0.5],
iou_labels=[0, 1],
batch_size_per_image=512,
positive_fraction=0.25,
score_thresh_test=0.05,
nms_thresh_test=0.5,
proposal_append_gt=True),
roi_box_head=dict(
name='FastRCNNConvFCHead',
num_fc=2,
fc_dim=1024,
conv_dim=256,
pooler_type='ROIAlignV2',
pooler_resolution=7,
pooler_sampling_ratio=0,
bbox_reg_loss_type='smooth_l1',
bbox_reg_loss_weight=1.0,
bbox_reg_weights=(10.0, 10.0, 5.0, 5.0),
smooth_l1_beta=0.0,
cls_agnostic_bbox_reg=False)))
| 2,940 | 37.697368 | 115 |
py
|
ERD
|
ERD-main/configs/misc/d2_retinanet_r50-caffe_fpn_ms-90k_coco.py
|
_base_ = '../common/ms-90k_coco.py'
# model settings
model = dict(
type='Detectron2Wrapper',
bgr_to_rgb=False,
detector=dict(
# The settings in `d2_detector` will merged into default settings
# in detectron2. More details please refer to
# https://github.com/facebookresearch/detectron2/blob/main/detectron2/config/defaults.py # noqa
meta_architecture='RetinaNet',
# If you want to finetune the detector, you can use the
# checkpoint released by detectron2, for example:
# weights='detectron2://COCO-Detection/retinanet_R_50_FPN_1x/190397773/model_final_bfca0b.pkl' # noqa
weights='detectron2://ImageNetPretrained/MSRA/R-50.pkl',
mask_on=False,
pixel_mean=[103.530, 116.280, 123.675],
pixel_std=[1.0, 1.0, 1.0],
backbone=dict(name='build_retinanet_resnet_fpn_backbone', freeze_at=2),
resnets=dict(
depth=50,
out_features=['res3', 'res4', 'res5'],
num_groups=1,
norm='FrozenBN'),
fpn=dict(in_features=['res3', 'res4', 'res5'], out_channels=256),
anchor_generator=dict(
name='DefaultAnchorGenerator',
sizes=[[x, x * 2**(1.0 / 3), x * 2**(2.0 / 3)]
for x in [32, 64, 128, 256, 512]],
aspect_ratios=[[0.5, 1.0, 2.0]],
angles=[[-90, 0, 90]]),
retinanet=dict(
num_classes=80,
in_features=['p3', 'p4', 'p5', 'p6', 'p7'],
num_convs=4,
iou_thresholds=[0.4, 0.5],
iou_labels=[0, -1, 1],
bbox_reg_weights=(1.0, 1.0, 1.0, 1.0),
bbox_reg_loss_type='smooth_l1',
smooth_l1_loss_beta=0.0,
focal_loss_gamma=2.0,
focal_loss_alpha=0.25,
prior_prob=0.01,
score_thresh_test=0.05,
topk_candidates_test=1000,
nms_thresh_test=0.5)))
optim_wrapper = dict(optimizer=dict(lr=0.01))
| 1,987 | 39.571429 | 113 |
py
|
ERD
|
ERD-main/configs/free_anchor/freeanchor_x101-32x4d_fpn_1x_coco.py
|
_base_ = './freeanchor_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 366 | 25.214286 | 76 |
py
|
ERD
|
ERD-main/configs/free_anchor/freeanchor_r101_fpn_1x_coco.py
|
_base_ = './freeanchor_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 198 | 27.428571 | 61 |
py
|
ERD
|
ERD-main/configs/free_anchor/freeanchor_r50_fpn_1x_coco.py
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='FreeAnchorRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 753 | 31.782609 | 74 |
py
|
ERD
|
ERD-main/configs/scratch/faster-rcnn_r50-scratch_fpn_gn-all_6x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residual=False,
norm_cfg=norm_cfg,
init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.))
max_epochs = 73
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[65, 71],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
| 1,044 | 25.125 | 79 |
py
|
ERD
|
ERD-main/configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residual=False,
norm_cfg=norm_cfg,
init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.))
max_epochs = 73
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[65, 71],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
| 1,084 | 25.463415 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_ms-3x_coco.py
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
| 80 | 39.5 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_x101-32x4d_fpn_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 421 | 27.133333 | 76 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_x101-64x4d_fpn_ms-3x_coco.py
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 457 | 29.533333 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_coco.py
|
_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
| 239 | 29 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_bounded-iou_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0))))
| 207 | 28.714286 | 70 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_soft-nms_1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100)))
| 347 | 25.769231 | 72 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r101_fpn_ms-3x_coco.py
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 201 | 24.25 | 61 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_x101-32x4d_fpn_ms-3x_coco.py
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 457 | 29.533333 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_ms-1x_coco.py
|
_base_ = 'faster-rcnn_r50-caffe-dc5_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
| 498 | 32.266667 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_ciou_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='CIoULoss', loss_weight=12.0))))
| 201 | 27.857143 | 64 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco-person.py
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
metainfo = {
'classes': ('person', ),
'palette': [
(220, 20, 60),
]
}
train_dataloader = dict(dataset=dict(metainfo=metainfo))
val_dataloader = dict(dataset=dict(metainfo=metainfo))
test_dataloader = dict(dataset=dict(metainfo=metainfo))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
| 582 | 37.866667 | 209 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_iou_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='IoULoss', loss_weight=10.0))))
| 200 | 27.714286 | 63 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_x101-32x8d_fpn_ms-3x_coco.py
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
| 784 | 31.708333 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_ohem_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler'))))
| 118 | 38.666667 | 73 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r101_fpn_8xb8-amp-lsj-200e_coco.py
|
_base_ = './faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 215 | 26 | 61 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
| 480 | 29.0625 | 66 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r101-caffe_fpn_1x_coco.py
|
_base_ = './faster-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 224 | 27.125 | 67 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-90k_coco.py
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
max_iter = 90000
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=10000)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
| 561 | 22.416667 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe-dc5_1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
| 183 | 29.666667 | 72 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_8xb8-amp-lsj-200e_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../common/lsj-200e_coco-detection.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(data_preprocessor=dict(batch_augments=batch_augments))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.02 * 4, momentum=0.9, weight_decay=0.00004))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 712 | 32.952381 | 69 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 199 | 27.571429 | 61 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_x101-64x4d_fpn_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 421 | 27.133333 | 76 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r101-caffe_fpn_ms-3x_coco.py
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 311 | 25 | 67 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
| 177 | 28.666667 | 72 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_giou_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
| 201 | 27.857143 | 64 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_fcos-rpn_1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
# copied from configs/fcos/fcos_r50-caffe_fpn_gn-head_1x_coco.py
neck=dict(
start_level=1,
add_extra_convs='on_output', # use P5
relu_before_extra_convs=True),
rpn_head=dict(
_delete_=True, # ignore the unused old settings
type='FCOSHead',
# num_classes = 1 for rpn,
# if num_classes > 1, it will be set to 1 in
# TwoStageDetector automatically
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
roi_head=dict( # update featmap_strides
bbox_roi_extractor=dict(featmap_strides=[8, 16, 32, 64, 128])))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000), # Slowly increase lr, otherwise loss becomes NAN
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
| 1,520 | 30.040816 | 73 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe-c4_ms-1x_coco.py
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
| 499 | 32.333333 | 79 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-2x_coco.py
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begin=0,
# end=12,
# by_epoch=True,
# milestones=[16, 23],
# gamma=0.1)
# ]
_base_.param_scheduler[1].milestones = [16, 23]
train_cfg = dict(max_epochs=24)
| 505 | 25.631579 | 88 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe_c4-1x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
| 182 | 29.5 | 72 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50_fpn_2x_coco.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
| 177 | 28.666667 | 72 |
py
|
ERD
|
ERD-main/configs/faster_rcnn/faster-rcnn_r50-caffe_fpn_ms-1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
# MMEngine support the following two ways, users can choose
# according to convenience
# train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_.train_dataloader.dataset.pipeline = train_pipeline
| 1,082 | 32.84375 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.