repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py
|
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 226 | 31.428571 | 70 |
py
|
ERD
|
ERD-main/configs/seesaw_loss/mask-rcnn_r50_fpn_seesaw-loss-normed-mask_random-ms-2x_lvis-v1.py
|
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py'
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
| 195 | 31.666667 | 70 |
py
|
ERD
|
ERD-main/configs/seesaw_loss/mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py
|
_base_ = './mask-rcnn_r50_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py' # noqa: E501
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 252 | 35.142857 | 96 |
py
|
ERD
|
ERD-main/configs/seesaw_loss/cascade-mask-rcnn_r101_fpn_seesaw-loss-normed-mask_sample1e-3-ms-2x_lvis-v1.py
|
_base_ = './cascade-mask-rcnn_r101_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py' # noqa: E501
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
| 222 | 36.166667 | 93 |
py
|
ERD
|
ERD-main/configs/tood/tood_r101_fpn_ms-2x_coco.py
|
_base_ = './tood_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 196 | 23.625 | 61 |
py
|
ERD
|
ERD-main/configs/tood/tood_x101-64x4d_fpn_ms-2x_coco.py
|
_base_ = './tood_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 442 | 25.058824 | 76 |
py
|
ERD
|
ERD-main/configs/tood/tood_r50_fpn_anchor-based_1x_coco.py
|
_base_ = './tood_r50_fpn_1x_coco.py'
model = dict(bbox_head=dict(anchor_type='anchor_based'))
| 94 | 30.666667 | 56 |
py
|
ERD
|
ERD-main/configs/tood/tood_x101-64x4d-dconv-c4-c5_fpn_ms-2x_coco.py
|
_base_ = './tood_x101-64x4d_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True),
),
bbox_head=dict(num_dcn=2))
| 248 | 30.125 | 78 |
py
|
ERD
|
ERD-main/configs/tood/tood_r101-dconv-c3-c5_fpn_ms-2x_coco.py
|
_base_ = './tood_r101_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
bbox_head=dict(num_dcn=2))
| 236 | 28.625 | 78 |
py
|
ERD
|
ERD-main/configs/tood/tood_r50_fpn_ms-2x_coco.py
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 801 | 24.870968 | 79 |
py
|
ERD
|
ERD-main/configs/tood/tood_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='TOOD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='TOODHead',
num_classes=80,
in_channels=256,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
train_cfg=dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 2,482 | 29.654321 | 79 |
py
|
ERD
|
ERD-main/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
| 2,213 | 29.328767 | 79 |
py
|
ERD
|
ERD-main/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=128),
backbone=dict(
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=[
dict(
type='FPN',
in_channels=[384, 768, 1536],
out_channels=256,
start_level=0,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2000, 480), (2000, 1200)],
keep_ratio=True,
backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(2000, 1200), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=2,
dataset=dict(
type={{_base_.dataset_type}},
data_root={{_base_.data_root}},
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}})))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(
type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05),
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}),
clip_grad=None)
| 4,605 | 31.666667 | 129 |
py
|
ERD
|
ERD-main/configs/dyhead/atss_r50-caffe_fpn_dyhead_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=128),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=[
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
dict(
type='DyHead',
in_channels=256,
out_channels=256,
num_blocks=6,
# disable zero_init_offset to follow official implementation
zero_init_offset=False)
],
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
pred_kernel_size=1, # follow DyHead official implementation
stacked_convs=0,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5), # follow DyHead official implementation
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
| 3,366 | 31.375 | 78 |
py
|
ERD
|
ERD-main/configs/gn+ws/faster-rcnn_x50-32x4d_fpn_gn-ws-all_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=50,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
| 544 | 27.684211 | 66 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_20-23-24e_coco.py
|
_base_ = './mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
| 412 | 21.944444 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
| 207 | 28.714286 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
# model settings
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=50,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
| 559 | 27 | 66 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_x101-32x4d_fpn_gn-ws-all_2x_coco.py
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
# model settings
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
| 561 | 27.1 | 67 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_20-23-24e_coco.py
|
_base_ = './mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
| 405 | 21.555556 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)))
| 577 | 33 | 78 |
py
|
ERD
|
ERD-main/configs/gn+ws/faster-rcnn_r101_fpn_gn-ws-all_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws')))
| 209 | 29 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_r101_fpn_gn-ws-all_20-23-24e_coco.py
|
_base_ = './mask-rcnn_r101_fpn_gn-ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
| 406 | 21.611111 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_x50-32x4d_fpn_gn-ws-all_20-23-24e_coco.py
|
_base_ = './mask-rcnn_x50-32x4d_fpn_gn-ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
| 411 | 21.888889 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/mask-rcnn_r50_fpn_gn-ws-all_2x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg),
mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 988 | 28.088235 | 79 |
py
|
ERD
|
ERD-main/configs/gn+ws/faster-rcnn_x101-32x4d_fpn_gn-ws-all_1x_coco.py
|
_base_ = './faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
| 546 | 27.789474 | 67 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-rpn_x101-64x4d_fpn_1x_coco.py
|
_base_ = './ga-rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 416 | 26.8 | 76 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py
|
_base_ = '../retinanet/retinanet_r50-caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 2,032 | 31.790323 | 74 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-rpn_r101-caffe_fpn_1x_coco.py
|
_base_ = './ga-rpn_r50-caffe_fpn_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 236 | 25.333333 | 67 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-retinanet_r101-caffe_fpn_1x_coco.py
|
_base_ = './ga-retinanet_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 225 | 27.25 | 67 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-retinanet_x101-32x4d_fpn_1x_coco.py
|
_base_ = './ga-retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 422 | 27.2 | 76 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-faster-rcnn_r101-caffe_fpn_1x_coco.py
|
_base_ = './ga-faster-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 227 | 27.5 | 67 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-retinanet_x101-64x4d_fpn_1x_coco.py
|
_base_ = './ga-retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 422 | 27.2 | 76 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-rpn_r50-caffe_fpn_1x_coco.py
|
_base_ = '../rpn/rpn_r50-caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 2,005 | 33.586207 | 74 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-retinanet_r101-caffe_fpn_ms-2x.py
|
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 869 | 23.857143 | 73 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-faster-rcnn_x101-64x4d_fpn_1x_coco.py
|
_base_ = './ga-faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 424 | 27.333333 | 76 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-faster-rcnn_r50_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 2,379 | 35.615385 | 77 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-faster-rcnn_r50-caffe_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 2,385 | 35.707692 | 77 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-fast-rcnn_r50-caffe_fpn_1x_coco.py
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO: support loading proposals
data = dict(
train=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| 2,441 | 35.447761 | 78 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-faster-rcnn_x101-32x4d_fpn_1x_coco.py
|
_base_ = './ga-faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 424 | 27.333333 | 76 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-rpn_r50_fpn_1x_coco.py
|
_base_ = '../rpn/rpn_r50_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 1,999 | 33.482759 | 74 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 2,026 | 31.693548 | 74 |
py
|
ERD
|
ERD-main/configs/guided_anchoring/ga-rpn_x101-32x4d_fpn_1x_coco.py
|
_base_ = './ga-rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 416 | 26.8 | 76 |
py
|
ERD
|
ERD-main/configs/solov2/solov2-light_r50_fpn_ms-3x_coco.py
|
_base_ = './solov2_r50_fpn_1x_coco.py'
# model settings
model = dict(
mask_head=dict(
stacked_convs=2,
feat_channels=256,
scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)),
mask_feature_head=dict(out_channels=128)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384),
(768, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(448, 768), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
| 1,623 | 27.491228 | 77 |
py
|
ERD
|
ERD-main/configs/solov2/solov2-light_r18_fpn_ms-3x_coco.py
|
_base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
| 218 | 26.375 | 70 |
py
|
ERD
|
ERD-main/configs/solov2/solov2_r101_fpn_ms-3x_coco.py
|
_base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101, init_cfg=dict(checkpoint='torchvision://resnet101')))
| 166 | 22.857143 | 72 |
py
|
ERD
|
ERD-main/configs/solov2/solov2-light_r50-dcn_fpn_ms-3x_coco.py
|
_base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
feat_channels=256,
stacked_convs=3,
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
mask_feature_head=dict(out_channels=128),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=False)) # light solov2 head
| 525 | 34.066667 | 78 |
py
|
ERD
|
ERD-main/configs/solov2/solov2_r101-dcn_fpn_ms-3x_coco.py
|
_base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(checkpoint='torchvision://resnet101'),
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
| 457 | 31.714286 | 78 |
py
|
ERD
|
ERD-main/configs/solov2/solov2_x101-dcn_fpn_ms-3x_coco.py
|
_base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
| 560 | 30.166667 | 78 |
py
|
ERD
|
ERD-main/configs/solov2/solov2_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLOv2',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOV2Head',
num_classes=80,
in_channels=256,
feat_channels=512,
stacked_convs=4,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
mask_feature_head=dict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01), clip_grad=dict(max_norm=35, norm_type=2))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
| 2,046 | 27.830986 | 78 |
py
|
ERD
|
ERD-main/configs/solov2/solov2_r50_fpn_ms-3x_coco.py
|
_base_ = './solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
| 905 | 24.166667 | 73 |
py
|
ERD
|
ERD-main/configs/solov2/solov2-light_r34_fpn_ms-3x_coco.py
|
_base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')),
neck=dict(in_channels=[64, 128, 256, 512]))
| 218 | 26.375 | 70 |
py
|
ERD
|
ERD-main/configs/dab_detr/dab-detr_r50_8xb2-50e_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='DABDETR',
num_queries=300,
with_random_refpoints=False,
num_patterns=0,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=1),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='ChannelMapper',
in_channels=[2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=None,
num_outs=1),
encoder=dict(
num_layers=6,
layer_cfg=dict(
self_attn_cfg=dict(
embed_dims=256, num_heads=8, dropout=0., batch_first=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='PReLU')))),
decoder=dict(
num_layers=6,
query_dim=4,
query_scale_type='cond_elewise',
with_modulated_hw_attn=True,
layer_cfg=dict(
self_attn_cfg=dict(
embed_dims=256,
num_heads=8,
attn_drop=0.,
proj_drop=0.,
cross_attn=False),
cross_attn_cfg=dict(
embed_dims=256,
num_heads=8,
attn_drop=0.,
proj_drop=0.,
cross_attn=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='PReLU'))),
return_intermediate=True),
positional_encoding=dict(num_feats=128, temperature=20, normalize=True),
bbox_head=dict(
type='DABDETRHead',
num_classes=80,
embed_dims=256,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2., eps=1e-8),
dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])),
test_cfg=dict(max_per_img=300))
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}))
# learning policy
max_epochs = 50
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[40],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16, enable=False)
| 5,406 | 32.79375 | 79 |
py
|
ERD
|
ERD-main/configs/convnext/mask-rcnn_convnext-t-p4-w7_fpn_amp-ms-crop-3x_coco.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
| 3,397 | 34.030928 | 162 |
py
|
ERD
|
ERD-main/configs/convnext/cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]),
roi_head=dict(bbox_head=[
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
dict(
type='ConvFCBBoxHead',
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=False,
reg_decoded_bbox=True,
norm_cfg=dict(type='SyncBN', requires_grad=True),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0002,
betas=(0.9, 0.999),
weight_decay=0.05))
| 5,665 | 35.554839 | 162 |
py
|
ERD
|
ERD-main/configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py
|
_base_ = './cascade-mask-rcnn_convnext-t-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py' # noqa
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
| 965 | 34.777778 | 163 |
py
|
ERD
|
ERD-main/configs/lad/lad_r101-paa-r50_fpn_2xb8_coco_1x.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
| 3,956 | 29.914063 | 138 |
py
|
ERD
|
ERD-main/configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
| 3,934 | 29.984252 | 139 |
py
|
ERD
|
ERD-main/configs/sparse_rcnn/sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py
|
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 211 | 25.5 | 61 |
py
|
ERD
|
ERD-main/configs/sparse_rcnn/sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py
|
_base_ = './sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 230 | 27.875 | 73 |
py
|
ERD
|
ERD-main/configs/sparse_rcnn/sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py
|
_base_ = './sparse-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
| 953 | 27.909091 | 79 |
py
|
ERD
|
ERD-main/configs/sparse_rcnn/sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py
|
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 1,845 | 40.954545 | 75 |
py
|
ERD
|
ERD-main/configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 100
model = dict(
type='SparseRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
# training and testing settings
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2.0),
dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
]),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001),
clip_grad=dict(max_norm=1, norm_type=2))
| 3,572 | 34.029412 | 79 |
py
|
ERD
|
ERD-main/configs/cityscapes/mask-rcnn_r50_fpn_1x_cityscapes.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
num_classes=8,
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(num_classes=8)))
# optimizer
# lr is set for a batch size of 8
optim_wrapper = dict(optimizer=dict(lr=0.01))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=8,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
# actual epoch = 8 * 8 = 64
train_cfg = dict(max_epochs=8)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
| 1,354 | 29.795455 | 153 |
py
|
ERD
|
ERD-main/configs/cityscapes/faster-rcnn_r50_fpn_1x_cityscapes.py
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
num_classes=8,
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optim_wrapper = dict(optimizer=dict(lr=0.01))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=8,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
# actual epoch = 8 * 8 = 64
train_cfg = dict(max_epochs=8)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
| 1,286 | 29.642857 | 159 |
py
|
ERD
|
ERD-main/configs/deepfashion/mask-rcnn_r50_fpn_15e_deepfashion.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15)))
# runtime settings
max_epochs = 15
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
| 663 | 26.666667 | 79 |
py
|
ERD
|
ERD-main/configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 1,003 | 26.135135 | 79 |
py
|
ERD
|
ERD-main/configs/gn/mask-rcnn_r50-contrib_fpn_gn-all_3x_coco.py
|
_base_ = './mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
| 411 | 20.684211 | 79 |
py
|
ERD
|
ERD-main/configs/gn/mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 863 | 26 | 79 |
py
|
ERD
|
ERD-main/configs/gn/mask-rcnn_r50_fpn_gn-all_3x_coco.py
|
_base_ = './mask-rcnn_r50_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
| 403 | 20.263158 | 79 |
py
|
ERD
|
ERD-main/configs/gn/mask-rcnn_r101_fpn_gn-all_2x_coco.py
|
_base_ = './mask-rcnn_r50_fpn_gn-all_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_gn')))
| 219 | 26.5 | 63 |
py
|
ERD
|
ERD-main/configs/gn/mask-rcnn_r101_fpn_gn-all_3x_coco.py
|
_base_ = './mask-rcnn_r101_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
| 404 | 20.315789 | 79 |
py
|
ERD
|
ERD-main/docs/en/stat.py
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/main/configs'
files = sorted(glob.glob('../../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../../configs', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
| 1,537 | 22.661538 | 74 |
py
|
ERD
|
ERD-main/docs/en/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/main/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The main toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,435 | 28.367521 | 79 |
py
|
ERD
|
ERD-main/docs/zh_cn/stat.py
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/main/'
files = sorted(glob.glob('../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
| 1,517 | 22.353846 | 74 |
py
|
ERD
|
ERD-main/docs/zh_cn/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The main toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,459 | 28.07563 | 79 |
py
|
ERD
|
ERD-main/data_process/select_cats.py
|
import argparse
import json
import time
import os.path as osp
def arg_parse():
parser = argparse.ArgumentParser(description='COCO Dataset Loader')
parser.add_argument('--dataset', default='COCO', help='dataset type')
parser.add_argument('--data_path',
default='/home/bl/Documents/Projects/Space/detections/datasets_ssd/coco_root/annotations',
help='annotation path')
parser.add_argument('--anno_file', default='instances_train2017', help='annotation file without suffix')
# parser.add_argument('--anno_file', default='instances_val2017', help='annotation file without suffix')
args = parser.parse_args()
return args
def main():
args = arg_parse()
anno_file = osp.join(args.data_path, args.anno_file)
print('loading annotations into memory ...')
tic = time.time()
dataset = json.load(open(anno_file + '.json', 'r'))
assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time() - tic))
# sort by cat_ids
dataset['categories'] = sorted(dataset['categories'], key=lambda k: k['id'])
# ========================================>
# select specific cat_ids
sel_num_0, sel_num_1 = 40, 80
filename_suffix = '_last_40_catss'
# <========================================
sel_cats = dataset['categories'][sel_num_0:sel_num_1] # select first 40 casts
# select specific annotations
sel_cats_ids = [cat['id'] for cat in sel_cats]
sel_anno = []
sel_images_ids = []
for anno in dataset['annotations']:
if anno['category_id'] in sel_cats_ids:
sel_anno.append(anno)
sel_images_ids.append(anno['image_id'])
sel_images_ids = set(sel_images_ids)
sel_images = []
for img_ in dataset['images']:
if img_['id'] in sel_images_ids:
sel_images.append(img_)
# selected dataset dict
sel_dataset = dict()
sel_dataset['categories'] = sel_cats
sel_dataset['annotations'] = sel_anno
sel_dataset['images'] = sel_images
fp = open(anno_file + filename_suffix + '.json', 'w')
json.dump(sel_dataset, fp)
if __name__ == '__main__':
main()
| 2,243 | 31.521739 | 114 |
py
|
ERD
|
ERD-main/mmdet/registry.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMDetection provides 17 registry nodes to support using modules across
projects. Each node is a child of the root registry in MMEngine.
More details can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS
from mmengine.registry import DATASETS as MMENGINE_DATASETS
from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR
from mmengine.registry import HOOKS as MMENGINE_HOOKS
from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS
from mmengine.registry import LOOPS as MMENGINE_LOOPS
from mmengine.registry import METRICS as MMENGINE_METRICS
from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS
from mmengine.registry import MODELS as MMENGINE_MODELS
from mmengine.registry import \
OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS
from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS
from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS
from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS
from mmengine.registry import \
RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS
from mmengine.registry import RUNNERS as MMENGINE_RUNNERS
from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS
from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS
from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS
from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS
from mmengine.registry import \
WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS
from mmengine.registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry(
'runner', parent=MMENGINE_RUNNERS, locations=['mmdet.engine.runner'])
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry(
'runner constructor',
parent=MMENGINE_RUNNER_CONSTRUCTORS,
locations=['mmdet.engine.runner'])
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry(
'loop', parent=MMENGINE_LOOPS, locations=['mmdet.engine.runner'])
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry(
'hook', parent=MMENGINE_HOOKS, locations=['mmdet.engine.hooks'])
# manage data-related modules
DATASETS = Registry(
'dataset', parent=MMENGINE_DATASETS, locations=['mmdet.datasets'])
DATA_SAMPLERS = Registry(
'data sampler',
parent=MMENGINE_DATA_SAMPLERS,
locations=['mmdet.datasets.samplers'])
TRANSFORMS = Registry(
'transform',
parent=MMENGINE_TRANSFORMS,
locations=['mmdet.datasets.transforms'])
# manage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])
# manage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry(
'model_wrapper',
parent=MMENGINE_MODEL_WRAPPERS,
locations=['mmdet.models'])
# manage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry(
'weight initializer',
parent=MMENGINE_WEIGHT_INITIALIZERS,
locations=['mmdet.models'])
# manage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry(
'optimizer',
parent=MMENGINE_OPTIMIZERS,
locations=['mmdet.engine.optimizers'])
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry(
'optim_wrapper',
parent=MMENGINE_OPTIM_WRAPPERS,
locations=['mmdet.engine.optimizers'])
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry(
'optimizer constructor',
parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS,
locations=['mmdet.engine.optimizers'])
# manage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler',
parent=MMENGINE_PARAM_SCHEDULERS,
locations=['mmdet.engine.schedulers'])
# manage all kinds of metrics
METRICS = Registry(
'metric', parent=MMENGINE_METRICS, locations=['mmdet.evaluation'])
# manage evaluator
EVALUATOR = Registry(
'evaluator', parent=MMENGINE_EVALUATOR, locations=['mmdet.evaluation'])
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry(
'task util', parent=MMENGINE_TASK_UTILS, locations=['mmdet.models'])
# manage visualizer
VISUALIZERS = Registry(
'visualizer',
parent=MMENGINE_VISUALIZERS,
locations=['mmdet.visualization'])
# manage visualizer backend
VISBACKENDS = Registry(
'vis_backend',
parent=MMENGINE_VISBACKENDS,
locations=['mmdet.visualization'])
# manage logprocessor
LOG_PROCESSORS = Registry(
'log_processor',
parent=MMENGINE_LOG_PROCESSORS,
# TODO: update the location when mmdet has its own log processor
locations=['mmdet.engine'])
| 4,875 | 38.967213 | 78 |
py
|
ERD
|
ERD-main/mmdet/version.py
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
| 795 | 27.428571 | 72 |
py
|
ERD
|
ERD-main/mmdet/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
| 1,042 | 36.25 | 76 |
py
|
ERD
|
ERD-main/mmdet/apis/inference.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.ops import RoIPool
from mmcv.transforms import Compose
from mmengine.config import Config
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from mmdet.registry import DATASETS
from ..evaluation import get_classes
from ..registry import MODELS
from ..structures import DetDataSample, SampleList
from ..utils import get_test_pipeline_cfg
def init_detector(
config: Union[str, Path, Config],
checkpoint: Optional[str] = None,
palette: str = 'none',
device: str = 'cuda:0',
cfg_options: Optional[dict] = None,
) -> nn.Module:
"""Initialize a detector from config file.
Args:
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
palette (str): Color palette used for visualization. If palette
is stored in checkpoint, use checkpoint's palette first, otherwise
use externally passed palette. Currently, supports 'coco', 'voc',
'citys' and 'random'. Defaults to none.
device (str): The device where the anchors will be put on.
Defaults to cuda:0.
cfg_options (dict, optional): Options to override some settings in
the used config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
init_default_scope(config.get('default_scope', 'mmdet'))
model = MODELS.build(config.model)
model = revert_sync_batchnorm(model)
if checkpoint is None:
warnings.simplefilter('once')
warnings.warn('checkpoint is None, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
else:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
# Weights converted from elsewhere may not have meta fields.
checkpoint_meta = checkpoint.get('meta', {})
# save the dataset_meta in the model for convenience
if 'dataset_meta' in checkpoint_meta:
# mmdet 3.x, all keys should be lowercase
model.dataset_meta = {
k.lower(): v
for k, v in checkpoint_meta['dataset_meta'].items()
}
elif 'CLASSES' in checkpoint_meta:
# < mmdet 3.x
classes = checkpoint_meta['CLASSES']
model.dataset_meta = {'classes': classes}
else:
warnings.simplefilter('once')
warnings.warn(
'dataset_meta or class names are not saved in the '
'checkpoint\'s meta data, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
# Priority: args.palette -> config -> checkpoint
if palette != 'none':
model.dataset_meta['palette'] = palette
else:
test_dataset_cfg = copy.deepcopy(config.test_dataloader.dataset)
# lazy init. We only need the metainfo.
test_dataset_cfg['lazy_init'] = True
metainfo = DATASETS.build(test_dataset_cfg).metainfo
cfg_palette = metainfo.get('palette', None)
if cfg_palette is not None:
model.dataset_meta['palette'] = cfg_palette
else:
if 'palette' not in model.dataset_meta:
warnings.warn(
'palette does not exist, random is used by default. '
'You can also set the palette to customize.')
model.dataset_meta['palette'] = 'random'
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
ImagesType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]
def inference_detector(
model: nn.Module,
imgs: ImagesType,
test_pipeline: Optional[Compose] = None
) -> Union[DetDataSample, SampleList]:
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str, ndarray, Sequence[str/ndarray]):
Either image files or loaded images.
test_pipeline (:obj:`Compose`): Test pipeline.
Returns:
:obj:`DetDataSample` or list[:obj:`DetDataSample`]:
If imgs is a list or tuple, the same length list type results
will be returned, otherwise return the detection results directly.
"""
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
if test_pipeline is None:
cfg = cfg.copy()
test_pipeline = get_test_pipeline_cfg(cfg)
if isinstance(imgs[0], np.ndarray):
# Calling this method across libraries will result
# in module unregistered error if not prefixed with mmdet.
test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(test_pipeline)
if model.data_preprocessor.device.type == 'cpu':
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
result_list = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# TODO: remove img_id.
data_ = dict(img=img, img_id=0)
else:
# TODO: remove img_id.
data_ = dict(img_path=img, img_id=0)
# build the data pipeline
data_ = test_pipeline(data_)
data_['inputs'] = [data_['inputs']]
data_['data_samples'] = [data_['data_samples']]
# forward the model
with torch.no_grad():
results = model.test_step(data_)[0]
result_list.append(results)
if not is_batch:
return result_list[0]
else:
return result_list
# TODO: Awaiting refactoring
async def async_inference_detector(model, imgs):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = model.cfg
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray'
# cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
for m in model.modules():
assert not isinstance(
m,
RoIPool), 'CPU inference with RoIPool is not supported currently.'
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
results = await model.aforward_test(data, rescale=True)
return results
| 8,007 | 33.222222 | 79 |
py
|
ERD
|
ERD-main/mmdet/apis/det_inferencer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import warnings
from typing import Dict, Iterable, List, Optional, Sequence, Union
import mmcv
import mmengine
import numpy as np
import torch.nn as nn
from mmengine.dataset import Compose
from mmengine.fileio import (get_file_backend, isdir, join_path,
list_dir_or_file)
from mmengine.infer.infer import BaseInferencer, ModelType
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner.checkpoint import _load_checkpoint_to_model
from mmengine.visualization import Visualizer
from rich.progress import track
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.registry import DATASETS
from mmdet.structures import DetDataSample
from mmdet.structures.mask import encode_mask_results, mask2bbox
from mmdet.utils import ConfigType
from ..evaluation import get_classes
try:
from panopticapi.evaluation import VOID
from panopticapi.utils import id2rgb
except ImportError:
id2rgb = None
VOID = None
InputType = Union[str, np.ndarray]
InputsType = Union[InputType, Sequence[InputType]]
PredType = List[DetDataSample]
ImgType = Union[np.ndarray, Sequence[np.ndarray]]
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
class DetInferencer(BaseInferencer):
"""Object Detection Inferencer.
Args:
model (str, optional): Path to the config file or the model name
defined in metafile. For example, it could be
"rtmdet-s" or 'rtmdet_s_8xb32-300e_coco' or
"configs/rtmdet/rtmdet_s_8xb32-300e_coco.py".
If model is not specified, user must provide the
`weights` saved by MMEngine which contains the config string.
Defaults to None.
weights (str, optional): Path to the checkpoint. If it is not specified
and model is a model name of metafile, the weights will be loaded
from metafile. Defaults to None.
device (str, optional): Device to run inference. If None, the available
device will be automatically used. Defaults to None.
scope (str, optional): The scope of the model. Defaults to mmdet.
palette (str): Color palette used for visualization. The order of
priority is palette -> config -> checkpoint. Defaults to 'none'.
"""
preprocess_kwargs: set = set()
forward_kwargs: set = set()
visualize_kwargs: set = {
'return_vis',
'show',
'wait_time',
'draw_pred',
'pred_score_thr',
'img_out_dir',
'no_save_vis',
}
postprocess_kwargs: set = {
'print_result',
'pred_out_dir',
'return_datasample',
'no_save_pred',
}
def __init__(self,
model: Optional[Union[ModelType, str]] = None,
weights: Optional[str] = None,
device: Optional[str] = None,
scope: Optional[str] = 'mmdet',
palette: str = 'none') -> None:
# A global counter tracking the number of images processed, for
# naming of the output images
self.num_visualized_imgs = 0
self.num_predicted_imgs = 0
self.palette = palette
init_default_scope(scope)
super().__init__(
model=model, weights=weights, device=device, scope=scope)
self.model = revert_sync_batchnorm(self.model)
def _load_weights_to_model(self, model: nn.Module,
checkpoint: Optional[dict],
cfg: Optional[ConfigType]) -> None:
"""Loading model weights and meta information from cfg and checkpoint.
Args:
model (nn.Module): Model to load weights and meta information.
checkpoint (dict, optional): The loaded checkpoint.
cfg (Config or ConfigDict, optional): The loaded config.
"""
if checkpoint is not None:
_load_checkpoint_to_model(model, checkpoint)
checkpoint_meta = checkpoint.get('meta', {})
# save the dataset_meta in the model for convenience
if 'dataset_meta' in checkpoint_meta:
# mmdet 3.x, all keys should be lowercase
model.dataset_meta = {
k.lower(): v
for k, v in checkpoint_meta['dataset_meta'].items()
}
elif 'CLASSES' in checkpoint_meta:
# < mmdet 3.x
classes = checkpoint_meta['CLASSES']
model.dataset_meta = {'classes': classes}
else:
warnings.warn(
'dataset_meta or class names are not saved in the '
'checkpoint\'s meta data, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
else:
warnings.warn('Checkpoint is not loaded, and the inference '
'result is calculated by the randomly initialized '
'model!')
warnings.warn('weights is None, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
# Priority: args.palette -> config -> checkpoint
if self.palette != 'none':
model.dataset_meta['palette'] = self.palette
else:
test_dataset_cfg = copy.deepcopy(cfg.test_dataloader.dataset)
# lazy init. We only need the metainfo.
test_dataset_cfg['lazy_init'] = True
metainfo = DATASETS.build(test_dataset_cfg).metainfo
cfg_palette = metainfo.get('palette', None)
if cfg_palette is not None:
model.dataset_meta['palette'] = cfg_palette
else:
if 'palette' not in model.dataset_meta:
warnings.warn(
'palette does not exist, random is used by default. '
'You can also set the palette to customize.')
model.dataset_meta['palette'] = 'random'
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
return Compose(pipeline_cfg)
def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int:
"""Returns the index of the transform in a pipeline.
If the transform is not found, returns -1.
"""
for i, transform in enumerate(pipeline_cfg):
if transform['type'] == name:
return i
return -1
def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]:
"""Initialize visualizers.
Args:
cfg (ConfigType): Config containing the visualizer information.
Returns:
Visualizer or None: Visualizer initialized with config.
"""
visualizer = super()._init_visualizer(cfg)
visualizer.dataset_meta = self.model.dataset_meta
return visualizer
def _inputs_to_list(self, inputs: InputsType) -> list:
"""Preprocess the inputs to a list.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str:
- Directory path: return all files in the directory
- other cases: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`.
"""
if isinstance(inputs, str):
backend = get_file_backend(inputs)
if hasattr(backend, 'isdir') and isdir(inputs):
# Backends like HttpsBackend do not implement `isdir`, so only
# those backends that implement `isdir` could accept the inputs
# as a directory
filename_list = list_dir_or_file(
inputs, list_dir=False, suffix=IMG_EXTENSIONS)
inputs = [
join_path(inputs, filename) for filename in filename_list
]
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
return list(inputs)
def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):
"""Process the inputs into a model-feedable format.
Customize your preprocess by overriding this method. Preprocess should
return an iterable object, of which each item will be used as the
input of ``model.test_step``.
``BaseInferencer.preprocess`` will return an iterable chunked data,
which will be used in __call__ like this:
.. code-block:: python
def __call__(self, inputs, batch_size=1, **kwargs):
chunked_data = self.preprocess(inputs, batch_size, **kwargs)
for batch in chunked_data:
preds = self.forward(batch, **kwargs)
Args:
inputs (InputsType): Inputs given by user.
batch_size (int): batch size. Defaults to 1.
Yields:
Any: Data processed by the ``pipeline`` and ``collate_fn``.
"""
chunked_data = self._get_chunk_data(inputs, batch_size)
yield from map(self.collate_fn, chunked_data)
def _get_chunk_data(self, inputs: Iterable, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append((inputs_, self.pipeline(inputs_)))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
def __call__(self,
inputs: InputsType,
batch_size: int = 1,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
no_save_vis: bool = False,
draw_pred: bool = True,
pred_score_thr: float = 0.3,
return_datasample: bool = False,
print_result: bool = False,
no_save_pred: bool = True,
out_dir: str = '',
**kwargs) -> dict:
"""Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer.
batch_size (int): Inference batch size. Defaults to 1.
show (bool): Whether to display the visualization results in a
popup window. Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
no_save_vis (bool): Whether to force not to save prediction
vis results. Defaults to False.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
pred_score_thr (float): Minimum score of bboxes to draw.
Defaults to 0.3.
return_datasample (bool): Whether to return results as
:obj:`DetDataSample`. Defaults to False.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
no_save_pred (bool): Whether to force not to save prediction
results. Defaults to True.
out_file: Dir to save the inference results or
visualization. If left as empty, no file will be saved.
Defaults to ''.
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
"""
(
preprocess_kwargs,
forward_kwargs,
visualize_kwargs,
postprocess_kwargs,
) = self._dispatch_kwargs(**kwargs)
ori_inputs = self._inputs_to_list(inputs)
inputs = self.preprocess(
ori_inputs, batch_size=batch_size, **preprocess_kwargs)
results_dict = {'predictions': [], 'visualization': []}
for ori_inputs, data in track(inputs, description='Inference'):
preds = self.forward(data, **forward_kwargs)
visualization = self.visualize(
ori_inputs,
preds,
return_vis=return_vis,
show=show,
wait_time=wait_time,
draw_pred=draw_pred,
pred_score_thr=pred_score_thr,
no_save_vis=no_save_vis,
img_out_dir=out_dir,
**visualize_kwargs)
results = self.postprocess(
preds,
visualization,
return_datasample=return_datasample,
print_result=print_result,
no_save_pred=no_save_pred,
pred_out_dir=out_dir,
**postprocess_kwargs)
results_dict['predictions'].extend(results['predictions'])
if results['visualization'] is not None:
results_dict['visualization'].extend(results['visualization'])
return results_dict
def visualize(self,
inputs: InputsType,
preds: PredType,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
pred_score_thr: float = 0.3,
no_save_vis: bool = False,
img_out_dir: str = '',
**kwargs) -> Union[List[np.ndarray], None]:
"""Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[:obj:`DetDataSample`]): Predictions of the model.
return_vis (bool): Whether to return the visualization result.
Defaults to False.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
pred_score_thr (float): Minimum score of bboxes to draw.
Defaults to 0.3.
no_save_vis (bool): Whether to force not to save prediction
vis results. Defaults to False.
img_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
Returns:
List[np.ndarray] or None: Returns visualization results only if
applicable.
"""
if no_save_vis is True:
img_out_dir = ''
if not show and img_out_dir == '' and not return_vis:
return None
if self.visualizer is None:
raise ValueError('Visualization needs the "visualizer" term'
'defined in the config, but got None.')
results = []
for single_input, pred in zip(inputs, preds):
if isinstance(single_input, str):
img_bytes = mmengine.fileio.get(single_input)
img = mmcv.imfrombytes(img_bytes)
img = img[:, :, ::-1]
img_name = osp.basename(single_input)
elif isinstance(single_input, np.ndarray):
img = single_input.copy()
img_num = str(self.num_visualized_imgs).zfill(8)
img_name = f'{img_num}.jpg'
else:
raise ValueError('Unsupported input type: '
f'{type(single_input)}')
out_file = osp.join(img_out_dir, 'vis',
img_name) if img_out_dir != '' else None
self.visualizer.add_datasample(
img_name,
img,
pred,
show=show,
wait_time=wait_time,
draw_gt=False,
draw_pred=draw_pred,
pred_score_thr=pred_score_thr,
out_file=out_file,
)
results.append(self.visualizer.get_image())
self.num_visualized_imgs += 1
return results
def postprocess(
self,
preds: PredType,
visualization: Optional[List[np.ndarray]] = None,
return_datasample: bool = False,
print_result: bool = False,
no_save_pred: bool = False,
pred_out_dir: str = '',
**kwargs,
) -> Dict:
"""Process the predictions and visualization results from ``forward``
and ``visualize``.
This method should be responsible for the following tasks:
1. Convert datasamples into a json-serializable dict if needed.
2. Pack the predictions and visualization results and return them.
3. Dump or log the predictions.
Args:
preds (List[:obj:`DetDataSample`]): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
return_datasample (bool): Whether to use Datasample to store
inference results. If False, dict will be used.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
no_save_pred (bool): Whether to force not to save prediction
results. Defaults to False.
pred_out_dir: Dir to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Inference and visualization results with key ``predictions``
and ``visualization``.
- ``visualization`` (Any): Returned by :meth:`visualize`.
- ``predictions`` (dict or DataSample): Returned by
:meth:`forward` and processed in :meth:`postprocess`.
If ``return_datasample=False``, it usually should be a
json-serializable dict containing only basic data elements such
as strings and numbers.
"""
if no_save_pred is True:
pred_out_dir = ''
result_dict = {}
results = preds
if not return_datasample:
results = []
for pred in preds:
result = self.pred2dict(pred, pred_out_dir)
results.append(result)
elif pred_out_dir != '':
warnings.warn('Currently does not support saving datasample '
'when return_datasample is set to True. '
'Prediction results are not saved!')
# Add img to the results after printing and dumping
result_dict['predictions'] = results
if print_result:
print(result_dict)
result_dict['visualization'] = visualization
return result_dict
# TODO: The data format and fields saved in json need further discussion.
# Maybe should include model name, timestamp, filename, image info etc.
def pred2dict(self,
data_sample: DetDataSample,
pred_out_dir: str = '') -> Dict:
"""Extract elements necessary to represent a prediction into a
dictionary.
It's better to contain only basic data elements such as strings and
numbers in order to guarantee it's json-serializable.
Args:
data_sample (:obj:`DetDataSample`): Predictions of the model.
pred_out_dir: Dir to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Prediction results.
"""
is_save_pred = True
if pred_out_dir == '':
is_save_pred = False
if is_save_pred and 'img_path' in data_sample:
img_path = osp.basename(data_sample.img_path)
img_path = osp.splitext(img_path)[0]
out_img_path = osp.join(pred_out_dir, 'preds',
img_path + '_panoptic_seg.png')
out_json_path = osp.join(pred_out_dir, 'preds', img_path + '.json')
elif is_save_pred:
out_img_path = osp.join(
pred_out_dir, 'preds',
f'{self.num_predicted_imgs}_panoptic_seg.png')
out_json_path = osp.join(pred_out_dir, 'preds',
f'{self.num_predicted_imgs}.json')
self.num_predicted_imgs += 1
result = {}
if 'pred_instances' in data_sample:
masks = data_sample.pred_instances.get('masks')
pred_instances = data_sample.pred_instances.numpy()
result = {
'bboxes': pred_instances.bboxes.tolist(),
'labels': pred_instances.labels.tolist(),
'scores': pred_instances.scores.tolist()
}
if masks is not None:
if pred_instances.bboxes.sum() == 0:
# Fake bbox, such as the SOLO.
bboxes = mask2bbox(masks.cpu()).numpy().tolist()
result['bboxes'] = bboxes
encode_masks = encode_mask_results(pred_instances.masks)
for encode_mask in encode_masks:
if isinstance(encode_mask['counts'], bytes):
encode_mask['counts'] = encode_mask['counts'].decode()
result['masks'] = encode_masks
if 'pred_panoptic_seg' in data_sample:
if VOID is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
pan = data_sample.pred_panoptic_seg.sem_seg.cpu().numpy()[0]
pan[pan % INSTANCE_OFFSET == len(
self.model.dataset_meta['classes'])] = VOID
pan = id2rgb(pan).astype(np.uint8)
if is_save_pred:
mmcv.imwrite(pan[:, :, ::-1], out_img_path)
result['panoptic_seg_path'] = out_img_path
else:
result['panoptic_seg'] = pan
if is_save_pred:
mmengine.dump(result, out_json_path)
return result
| 24,008 | 39.624365 | 79 |
py
|
ERD
|
ERD-main/mmdet/apis/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_inferencer import DetInferencer
from .inference import (async_inference_detector, inference_detector,
init_detector)
__all__ = [
'init_detector', 'async_inference_detector', 'inference_detector',
'DetInferencer'
]
| 305 | 29.6 | 70 |
py
|
ERD
|
ERD-main/mmdet/evaluation/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .functional import * # noqa: F401,F403
from .metrics import * # noqa: F401,F403
| 135 | 33 | 47 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/dump_proposals_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
backend_args=self.backend_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
| 4,984 | 40.541667 | 125 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/coco_occluded_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Union
import mmengine
import numpy as np
from mmengine.fileio import load
from mmengine.logging import print_log
from pycocotools import mask as coco_mask
from terminaltables import AsciiTable
from mmdet.registry import METRICS
from .coco_metric import CocoMetric
@METRICS.register_module()
class CocoOccludedSeparatedMetric(CocoMetric):
"""Metric of separated and occluded masks which presented in paper `A Tri-
Layer Plugin to Improve Occluded Detection.
<https://arxiv.org/abs/2210.10046>`_.
Separated COCO and Occluded COCO are automatically generated subsets of
COCO val dataset, collecting separated objects and partially occluded
objects for a large variety of categories. In this way, we define
occlusion into two major categories: separated and partially occluded.
- Separation: target object segmentation mask is separated into distinct
regions by the occluder.
- Partial Occlusion: target object is partially occluded but the
segmentation mask is connected.
These two new scalable real-image datasets are to benchmark a model's
capability to detect occluded objects of 80 common categories.
Please cite the paper if you use this dataset:
@article{zhan2022triocc,
title={A Tri-Layer Plugin to Improve Occluded Detection},
author={Zhan, Guanqi and Xie, Weidi and Zisserman, Andrew},
journal={British Machine Vision Conference},
year={2022}
}
Args:
occluded_ann (str): Path to the occluded coco annotation file.
separated_ann (str): Path to the separated coco annotation file.
score_thr (float): Score threshold of the detection masks.
Defaults to 0.3.
iou_thr (float): IoU threshold for the recall calculation.
Defaults to 0.75.
metric (str | List[str]): Metrics to be evaluated. Valid metrics
include 'bbox', 'segm', 'proposal', and 'proposal_fast'.
Defaults to 'bbox'.
"""
default_prefix: Optional[str] = 'coco'
def __init__(
self,
*args,
occluded_ann:
str = 'https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/occluded_coco.pkl', # noqa
separated_ann:
str = 'https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/separated_coco.pkl', # noqa
score_thr: float = 0.3,
iou_thr: float = 0.75,
metric: Union[str, List[str]] = ['bbox', 'segm'],
**kwargs) -> None:
super().__init__(*args, metric=metric, **kwargs)
self.occluded_ann = load(occluded_ann)
self.separated_ann = load(separated_ann)
self.score_thr = score_thr
self.iou_thr = iou_thr
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
coco_metric_res = super().compute_metrics(results)
eval_res = self.evaluate_occluded_separated(results)
coco_metric_res.update(eval_res)
return coco_metric_res
def evaluate_occluded_separated(self, results: List[tuple]) -> dict:
"""Compute the recall of occluded and separated masks.
Args:
results (list[tuple]): Testing results of the dataset.
Returns:
dict[str, float]: The recall of occluded and separated masks.
"""
dict_det = {}
print_log('processing detection results...')
prog_bar = mmengine.ProgressBar(len(results))
for i in range(len(results)):
gt, dt = results[i]
img_id = dt['img_id']
cur_img_name = self._coco_api.imgs[img_id]['file_name']
if cur_img_name not in dict_det.keys():
dict_det[cur_img_name] = []
for bbox, score, label, mask in zip(dt['bboxes'], dt['scores'],
dt['labels'], dt['masks']):
cur_binary_mask = coco_mask.decode(mask)
dict_det[cur_img_name].append([
score, self.dataset_meta['classes'][label],
cur_binary_mask, bbox
])
dict_det[cur_img_name].sort(
key=lambda x: (-x[0], x[3][0], x[3][1])
) # rank by confidence from high to low, avoid same confidence
prog_bar.update()
print_log('\ncomputing occluded mask recall...', logger='current')
occluded_correct_num, occluded_recall = self.compute_recall(
dict_det, gt_ann=self.occluded_ann, is_occ=True)
print_log(
f'\nCOCO occluded mask recall: {occluded_recall:.2f}%',
logger='current')
print_log(
f'COCO occluded mask success num: {occluded_correct_num}',
logger='current')
print_log('computing separated mask recall...', logger='current')
separated_correct_num, separated_recall = self.compute_recall(
dict_det, gt_ann=self.separated_ann, is_occ=False)
print_log(
f'\nCOCO separated mask recall: {separated_recall:.2f}%',
logger='current')
print_log(
f'COCO separated mask success num: {separated_correct_num}',
logger='current')
table_data = [
['mask type', 'recall', 'num correct'],
['occluded', f'{occluded_recall:.2f}%', occluded_correct_num],
['separated', f'{separated_recall:.2f}%', separated_correct_num]
]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger='current')
return dict(
occluded_recall=occluded_recall, separated_recall=separated_recall)
def compute_recall(self,
result_dict: dict,
gt_ann: list,
is_occ: bool = True) -> tuple:
"""Compute the recall of occluded or separated masks.
Args:
result_dict (dict): Processed mask results.
gt_ann (list): Occluded or separated coco annotations.
is_occ (bool): Whether the annotation is occluded mask.
Defaults to True.
Returns:
tuple: number of correct masks and the recall.
"""
correct = 0
prog_bar = mmengine.ProgressBar(len(gt_ann))
for iter_i in range(len(gt_ann)):
cur_item = gt_ann[iter_i]
cur_img_name = cur_item[0]
cur_gt_bbox = cur_item[3]
if is_occ:
cur_gt_bbox = [
cur_gt_bbox[0], cur_gt_bbox[1],
cur_gt_bbox[0] + cur_gt_bbox[2],
cur_gt_bbox[1] + cur_gt_bbox[3]
]
cur_gt_class = cur_item[1]
cur_gt_mask = coco_mask.decode(cur_item[4])
assert cur_img_name in result_dict.keys()
cur_detections = result_dict[cur_img_name]
correct_flag = False
for i in range(len(cur_detections)):
cur_det_confidence = cur_detections[i][0]
if cur_det_confidence < self.score_thr:
break
cur_det_class = cur_detections[i][1]
if cur_det_class != cur_gt_class:
continue
cur_det_mask = cur_detections[i][2]
cur_iou = self.mask_iou(cur_det_mask, cur_gt_mask)
if cur_iou >= self.iou_thr:
correct_flag = True
break
if correct_flag:
correct += 1
prog_bar.update()
recall = correct / len(gt_ann) * 100
return correct, recall
def mask_iou(self, mask1: np.ndarray, mask2: np.ndarray) -> np.ndarray:
"""Compute IoU between two masks."""
mask1_area = np.count_nonzero(mask1 == 1)
mask2_area = np.count_nonzero(mask2 == 1)
intersection = np.count_nonzero(np.logical_and(mask1 == 1, mask2 == 1))
iou = intersection / (mask1_area + mask2_area - intersection)
return iou
| 8,370 | 39.834146 | 99 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/coco_panoptic_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import itertools
import os.path as osp
import tempfile
from typing import Dict, Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump, get_local_path, load
from mmengine.logging import MMLogger, print_log
from terminaltables import AsciiTable
from mmdet.datasets.api_wrappers import COCOPanoptic
from mmdet.registry import METRICS
from ..functional import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
try:
import panopticapi
from panopticapi.evaluation import VOID, PQStat
from panopticapi.utils import id2rgb, rgb2id
except ImportError:
panopticapi = None
id2rgb = None
rgb2id = None
VOID = None
PQStat = None
@METRICS.register_module()
class CocoPanopticMetric(BaseMetric):
"""COCO panoptic segmentation evaluation metric.
Evaluate PQ, SQ RQ for panoptic segmentation tasks. Please refer to
https://cocodataset.org/#panoptic-eval for more details.
Args:
ann_file (str, optional): Path to the coco format annotation file.
If not specified, ground truth annotations from the dataset will
be converted to coco format. Defaults to None.
seg_prefix (str, optional): Path to the directory which contains the
coco panoptic segmentation mask. It should be specified when
evaluate. Defaults to None.
classwise (bool): Whether to evaluate the metric class-wise.
Defaults to False.
outfile_prefix (str, optional): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created.
It should be specified when format_only is True. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
nproc (int): Number of processes for panoptic quality computing.
Defaults to 32. When ``nproc`` exceeds the number of cpu cores,
the number of cpu cores is used.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'coco_panoptic'
def __init__(self,
ann_file: Optional[str] = None,
seg_prefix: Optional[str] = None,
classwise: bool = False,
format_only: bool = False,
outfile_prefix: Optional[str] = None,
nproc: int = 32,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
if panopticapi is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
super().__init__(collect_device=collect_device, prefix=prefix)
self.classwise = classwise
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
self.tmp_dir = None
# outfile_prefix should be a prefix of a path which points to a shared
# storage when train or test with multi nodes.
self.outfile_prefix = outfile_prefix
if outfile_prefix is None:
self.tmp_dir = tempfile.TemporaryDirectory()
self.outfile_prefix = osp.join(self.tmp_dir.name, 'results')
# the directory to save predicted panoptic segmentation mask
self.seg_out_dir = f'{self.outfile_prefix}.panoptic'
self.nproc = nproc
self.seg_prefix = seg_prefix
self.cat_ids = None
self.cat2label = None
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
if ann_file:
with get_local_path(
ann_file, backend_args=self.backend_args) as local_path:
self._coco_api = COCOPanoptic(local_path)
self.categories = self._coco_api.cats
else:
self._coco_api = None
self.categories = None
def __del__(self) -> None:
"""Clean up."""
if self.tmp_dir is not None:
self.tmp_dir.cleanup()
def gt_to_coco_json(self, gt_dicts: Sequence[dict],
outfile_prefix: str) -> Tuple[str, str]:
"""Convert ground truth to coco panoptic segmentation format json file.
Args:
gt_dicts (Sequence[dict]): Ground truth of the dataset.
outfile_prefix (str): The filename prefix of the json file. If the
prefix is "somepath/xxx", the json file will be named
"somepath/xxx.gt.json".
Returns:
Tuple[str, str]: The filename of the json file and the name of the\
directory which contains panoptic segmentation masks.
"""
assert len(gt_dicts) > 0, 'gt_dicts is empty.'
gt_folder = osp.dirname(gt_dicts[0]['seg_map_path'])
converted_json_path = f'{outfile_prefix}.gt.json'
categories = []
for id, name in enumerate(self.dataset_meta['classes']):
isthing = 1 if name in self.dataset_meta['thing_classes'] else 0
categories.append({'id': id, 'name': name, 'isthing': isthing})
image_infos = []
annotations = []
for gt_dict in gt_dicts:
img_id = gt_dict['image_id']
image_info = {
'id': img_id,
'width': gt_dict['width'],
'height': gt_dict['height'],
'file_name': osp.split(gt_dict['seg_map_path'])[-1]
}
image_infos.append(image_info)
pan_png = mmcv.imread(gt_dict['seg_map_path']).squeeze()
pan_png = pan_png[:, :, ::-1]
pan_png = rgb2id(pan_png)
segments_info = []
for segment_info in gt_dict['segments_info']:
id = segment_info['id']
label = segment_info['category']
mask = pan_png == id
isthing = categories[label]['isthing']
if isthing:
iscrowd = 1 if not segment_info['is_thing'] else 0
else:
iscrowd = 0
new_segment_info = {
'id': id,
'category_id': label,
'isthing': isthing,
'iscrowd': iscrowd,
'area': mask.sum()
}
segments_info.append(new_segment_info)
segm_file = image_info['file_name'].replace('jpg', 'png')
annotation = dict(
image_id=img_id,
segments_info=segments_info,
file_name=segm_file)
annotations.append(annotation)
pan_png = id2rgb(pan_png)
info = dict(
date_created=str(datetime.datetime.now()),
description='Coco json file converted by mmdet CocoPanopticMetric.'
)
coco_json = dict(
info=info,
images=image_infos,
categories=categories,
licenses=None,
)
if len(annotations) > 0:
coco_json['annotations'] = annotations
dump(coco_json, converted_json_path)
return converted_json_path, gt_folder
def result2json(self, results: Sequence[dict],
outfile_prefix: str) -> Tuple[str, str]:
"""Dump the panoptic results to a COCO style json file and a directory.
Args:
results (Sequence[dict]): Testing results of the dataset.
outfile_prefix (str): The filename prefix of the json files and the
directory.
Returns:
Tuple[str, str]: The json file and the directory which contains \
panoptic segmentation masks. The filename of the json is
"somepath/xxx.panoptic.json" and name of the directory is
"somepath/xxx.panoptic".
"""
label2cat = dict((v, k) for (k, v) in self.cat2label.items())
pred_annotations = []
for idx in range(len(results)):
result = results[idx]
for segment_info in result['segments_info']:
sem_label = segment_info['category_id']
# convert sem_label to json label
cat_id = label2cat[sem_label]
segment_info['category_id'] = label2cat[sem_label]
is_thing = self.categories[cat_id]['isthing']
segment_info['isthing'] = is_thing
pred_annotations.append(result)
pan_json_results = dict(annotations=pred_annotations)
json_filename = f'{outfile_prefix}.panoptic.json'
dump(pan_json_results, json_filename)
return json_filename, (
self.seg_out_dir
if self.tmp_dir is None else tempfile.gettempdir())
def _parse_predictions(self,
pred: dict,
img_id: int,
segm_file: str,
label2cat=None) -> dict:
"""Parse panoptic segmentation predictions.
Args:
pred (dict): Panoptic segmentation predictions.
img_id (int): Image id.
segm_file (str): Segmentation file name.
label2cat (dict): Mapping from label to category id.
Defaults to None.
Returns:
dict: Parsed predictions.
"""
result = dict()
result['img_id'] = img_id
# shape (1, H, W) -> (H, W)
pan = pred['pred_panoptic_seg']['sem_seg'].cpu().numpy()[0]
pan_labels = np.unique(pan)
segments_info = []
for pan_label in pan_labels:
sem_label = pan_label % INSTANCE_OFFSET
# We reserve the length of dataset_meta['classes'] for VOID label
if sem_label == len(self.dataset_meta['classes']):
continue
mask = pan == pan_label
area = mask.sum()
segments_info.append({
'id':
int(pan_label),
# when ann_file provided, sem_label should be cat_id, otherwise
# sem_label should be a continuous id, not the cat_id
# defined in dataset
'category_id':
label2cat[sem_label] if label2cat else sem_label,
'area':
int(area)
})
# evaluation script uses 0 for VOID label.
pan[pan % INSTANCE_OFFSET == len(self.dataset_meta['classes'])] = VOID
pan = id2rgb(pan).astype(np.uint8)
mmcv.imwrite(pan[:, :, ::-1], osp.join(self.seg_out_dir, segm_file))
result = {
'image_id': img_id,
'segments_info': segments_info,
'file_name': segm_file
}
return result
def _compute_batch_pq_stats(self, data_samples: Sequence[dict]):
"""Process gts and predictions when ``outfile_prefix`` is not set, gts
are from dataset or a json file which is defined by ``ann_file``.
Intermediate results, ``pq_stats``, are computed here and put into
``self.results``.
"""
if self._coco_api is None:
categories = dict()
for id, name in enumerate(self.dataset_meta['classes']):
isthing = 1 if name in self.dataset_meta['thing_classes']\
else 0
categories[id] = {'id': id, 'name': name, 'isthing': isthing}
label2cat = None
else:
categories = self.categories
cat_ids = self._coco_api.get_cat_ids(
cat_names=self.dataset_meta['classes'])
label2cat = {i: cat_id for i, cat_id in enumerate(cat_ids)}
for data_sample in data_samples:
# parse pred
img_id = data_sample['img_id']
segm_file = osp.basename(data_sample['img_path']).replace(
'jpg', 'png')
result = self._parse_predictions(
pred=data_sample,
img_id=img_id,
segm_file=segm_file,
label2cat=label2cat)
# parse gt
gt = dict()
gt['image_id'] = img_id
gt['width'] = data_sample['ori_shape'][1]
gt['height'] = data_sample['ori_shape'][0]
gt['file_name'] = segm_file
if self._coco_api is None:
# get segments_info from data_sample
seg_map_path = osp.join(self.seg_prefix, segm_file)
pan_png = mmcv.imread(seg_map_path).squeeze()
pan_png = pan_png[:, :, ::-1]
pan_png = rgb2id(pan_png)
segments_info = []
for segment_info in data_sample['segments_info']:
id = segment_info['id']
label = segment_info['category']
mask = pan_png == id
isthing = categories[label]['isthing']
if isthing:
iscrowd = 1 if not segment_info['is_thing'] else 0
else:
iscrowd = 0
new_segment_info = {
'id': id,
'category_id': label,
'isthing': isthing,
'iscrowd': iscrowd,
'area': mask.sum()
}
segments_info.append(new_segment_info)
else:
# get segments_info from annotation file
segments_info = self._coco_api.imgToAnns[img_id]
gt['segments_info'] = segments_info
pq_stats = pq_compute_single_core(
proc_id=0,
annotation_set=[(gt, result)],
gt_folder=self.seg_prefix,
pred_folder=self.seg_out_dir,
categories=categories,
backend_args=self.backend_args)
self.results.append(pq_stats)
def _process_gt_and_predictions(self, data_samples: Sequence[dict]):
"""Process gts and predictions when ``outfile_prefix`` is set.
The predictions will be saved to directory specified by
``outfile_predfix``. The matched pair (gt, result) will be put into
``self.results``.
"""
for data_sample in data_samples:
# parse pred
img_id = data_sample['img_id']
segm_file = osp.basename(data_sample['img_path']).replace(
'jpg', 'png')
result = self._parse_predictions(
pred=data_sample, img_id=img_id, segm_file=segm_file)
# parse gt
gt = dict()
gt['image_id'] = img_id
gt['width'] = data_sample['ori_shape'][1]
gt['height'] = data_sample['ori_shape'][0]
if self._coco_api is None:
# get segments_info from dataset
gt['segments_info'] = data_sample['segments_info']
gt['seg_map_path'] = data_sample['seg_map_path']
self.results.append((gt, result))
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
# If ``self.tmp_dir`` is none, it will save gt and predictions to
# self.results, otherwise, it will compute pq_stats here.
if self.tmp_dir is None:
self._process_gt_and_predictions(data_samples)
else:
self._compute_batch_pq_stats(data_samples)
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch. There
are two cases:
- When ``outfile_prefix`` is not provided, the elements in
results are pq_stats which can be summed directly to get PQ.
- When ``outfile_prefix`` is provided, the elements in
results are tuples like (gt, pred).
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
if self.tmp_dir is None:
# do evaluation after collect all the results
# split gt and prediction list
gts, preds = zip(*results)
if self._coco_api is None:
# use converted gt json file to initialize coco api
logger.info('Converting ground truth to coco format...')
coco_json_path, gt_folder = self.gt_to_coco_json(
gt_dicts=gts, outfile_prefix=self.outfile_prefix)
self._coco_api = COCOPanoptic(coco_json_path)
else:
gt_folder = self.seg_prefix
self.cat_ids = self._coco_api.get_cat_ids(
cat_names=self.dataset_meta['classes'])
self.cat2label = {
cat_id: i
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self._coco_api.get_img_ids()
self.categories = self._coco_api.cats
# convert predictions to coco format and dump to json file
json_filename, pred_folder = self.result2json(
results=preds, outfile_prefix=self.outfile_prefix)
if self.format_only:
logger.info('results are saved in '
f'{osp.dirname(self.outfile_prefix)}')
return dict()
imgs = self._coco_api.imgs
gt_json = self._coco_api.img_ann_map
gt_json = [{
'image_id': k,
'segments_info': v,
'file_name': imgs[k]['segm_file']
} for k, v in gt_json.items()]
pred_json = load(json_filename)
pred_json = dict(
(el['image_id'], el) for el in pred_json['annotations'])
# match the gt_anns and pred_anns in the same image
matched_annotations_list = []
for gt_ann in gt_json:
img_id = gt_ann['image_id']
if img_id not in pred_json.keys():
raise Exception('no prediction for the image'
' with id: {}'.format(img_id))
matched_annotations_list.append((gt_ann, pred_json[img_id]))
pq_stat = pq_compute_multi_core(
matched_annotations_list,
gt_folder,
pred_folder,
self.categories,
backend_args=self.backend_args,
nproc=self.nproc)
else:
# aggregate the results generated in process
if self._coco_api is None:
categories = dict()
for id, name in enumerate(self.dataset_meta['classes']):
isthing = 1 if name in self.dataset_meta[
'thing_classes'] else 0
categories[id] = {
'id': id,
'name': name,
'isthing': isthing
}
self.categories = categories
pq_stat = PQStat()
for result in results:
pq_stat += result
metrics = [('All', None), ('Things', True), ('Stuff', False)]
pq_results = {}
for name, isthing in metrics:
pq_results[name], classwise_results = pq_stat.pq_average(
self.categories, isthing=isthing)
if name == 'All':
pq_results['classwise'] = classwise_results
classwise_results = None
if self.classwise:
classwise_results = {
k: v
for k, v in zip(self.dataset_meta['classes'],
pq_results['classwise'].values())
}
print_panoptic_table(pq_results, classwise_results, logger=logger)
results = parse_pq_results(pq_results)
return results
def parse_pq_results(pq_results: dict) -> dict:
"""Parse the Panoptic Quality results.
Args:
pq_results (dict): Panoptic Quality results.
Returns:
dict: Panoptic Quality results parsed.
"""
result = dict()
result['PQ'] = 100 * pq_results['All']['pq']
result['SQ'] = 100 * pq_results['All']['sq']
result['RQ'] = 100 * pq_results['All']['rq']
result['PQ_th'] = 100 * pq_results['Things']['pq']
result['SQ_th'] = 100 * pq_results['Things']['sq']
result['RQ_th'] = 100 * pq_results['Things']['rq']
result['PQ_st'] = 100 * pq_results['Stuff']['pq']
result['SQ_st'] = 100 * pq_results['Stuff']['sq']
result['RQ_st'] = 100 * pq_results['Stuff']['rq']
return result
def print_panoptic_table(
pq_results: dict,
classwise_results: Optional[dict] = None,
logger: Optional[Union['MMLogger', str]] = None) -> None:
"""Print the panoptic evaluation results table.
Args:
pq_results(dict): The Panoptic Quality results.
classwise_results(dict, optional): The classwise Panoptic Quality.
results. The keys are class names and the values are metrics.
Defaults to None.
logger (:obj:`MMLogger` | str, optional): Logger used for printing
related information during evaluation. Default: None.
"""
headers = ['', 'PQ', 'SQ', 'RQ', 'categories']
data = [headers]
for name in ['All', 'Things', 'Stuff']:
numbers = [
f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']
]
row = [name] + numbers + [pq_results[name]['n']]
data.append(row)
table = AsciiTable(data)
print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger)
if classwise_results is not None:
class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}'
for k in ['pq', 'sq', 'rq'])
for name, metrics in classwise_results.items()]
num_columns = min(8, len(class_metrics) * 4)
results_flatten = list(itertools.chain(*class_metrics))
headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4)
results_2d = itertools.zip_longest(
*[results_flatten[i::num_columns] for i in range(num_columns)])
data = [headers]
data += [result for result in results_2d]
table = AsciiTable(data)
print_log(
'Classwise Panoptic Evaluation Results:\n' + table.table,
logger=logger)
| 24,814 | 39.48124 | 125 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/coco_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import itertools
import os.path as osp
import tempfile
from collections import OrderedDict
from typing import Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump, get_local_path, load
from mmengine.logging import MMLogger
from terminaltables import AsciiTable
from mmdet.datasets.api_wrappers import COCO, COCOeval
from mmdet.registry import METRICS
from mmdet.structures.mask import encode_mask_results
from ..functional import eval_recalls
@METRICS.register_module()
class CocoMetric(BaseMetric):
"""COCO evaluation metric.
Evaluate AR, AP, and mAP for detection tasks including proposal/box
detection and instance segmentation. Please refer to
https://cocodataset.org/#detection-eval for more details.
Args:
ann_file (str, optional): Path to the coco format annotation file.
If not specified, ground truth annotations from the dataset will
be converted to coco format. Defaults to None.
metric (str | List[str]): Metrics to be evaluated. Valid metrics
include 'bbox', 'segm', 'proposal', and 'proposal_fast'.
Defaults to 'bbox'.
classwise (bool): Whether to evaluate the metric class-wise.
Defaults to False.
proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.
Defaults to (100, 300, 1000).
iou_thrs (float | List[float], optional): IoU threshold to compute AP
and AR. If not specified, IoUs from 0.5 to 0.95 will be used.
Defaults to None.
metric_items (List[str], optional): Metric result names to be
recorded in the evaluation result. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
outfile_prefix (str, optional): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
sort_categories (bool): Whether sort categories in annotations. Only
used for `Objects365V1Dataset`. Defaults to False.
"""
default_prefix: Optional[str] = 'coco'
def __init__(self,
ann_file: Optional[str] = None,
metric: Union[str, List[str]] = 'bbox',
classwise: bool = False,
proposal_nums: Sequence[int] = (100, 300, 1000),
iou_thrs: Optional[Union[float, Sequence[float]]] = None,
metric_items: Optional[Sequence[str]] = None,
format_only: bool = False,
outfile_prefix: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
sort_categories: bool = False) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
# coco evaluation metrics
self.metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in self.metrics:
if metric not in allowed_metrics:
raise KeyError(
"metric should be one of 'bbox', 'segm', 'proposal', "
f"'proposal_fast', but got {metric}.")
# do class wise evaluation, default False
self.classwise = classwise
# proposal_nums used to compute recall or precision.
self.proposal_nums = list(proposal_nums)
# iou_thrs used to compute recall or precision.
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.iou_thrs = iou_thrs
self.metric_items = metric_items
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
self.outfile_prefix = outfile_prefix
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
# if ann_file is not specified,
# initialize coco api with the converted dataset
if ann_file is not None:
with get_local_path(
ann_file, backend_args=self.backend_args) as local_path:
self._coco_api = COCO(local_path)
if sort_categories:
# 'categories' list in objects365_train.json and
# objects365_val.json is inconsistent, need sort
# list(or dict) before get cat_ids.
cats = self._coco_api.cats
sorted_cats = {i: cats[i] for i in sorted(cats)}
self._coco_api.cats = sorted_cats
categories = self._coco_api.dataset['categories']
sorted_categories = sorted(
categories, key=lambda i: i['id'])
self._coco_api.dataset['categories'] = sorted_categories
else:
self._coco_api = None
# handle dataset lazy init
self.cat_ids = None
self.img_ids = None
def fast_eval_recall(self,
results: List[dict],
proposal_nums: Sequence[int],
iou_thrs: Sequence[float],
logger: Optional[MMLogger] = None) -> np.ndarray:
"""Evaluate proposal recall with COCO's fast_eval_recall.
Args:
results (List[dict]): Results of the dataset.
proposal_nums (Sequence[int]): Proposal numbers used for
evaluation.
iou_thrs (Sequence[float]): IoU thresholds used for evaluation.
logger (MMLogger, optional): Logger used for logging the recall
summary.
Returns:
np.ndarray: Averaged recall results.
"""
gt_bboxes = []
pred_bboxes = [result['bboxes'] for result in results]
for i in range(len(self.img_ids)):
ann_ids = self._coco_api.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self._coco_api.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(self, bbox: np.ndarray) -> list:
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox: List = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def results2json(self, results: Sequence[dict],
outfile_prefix: str) -> dict:
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (Sequence[dict]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict: Possible keys are "bbox", "segm", "proposal", and
values are corresponding filenames.
"""
bbox_json_results = []
segm_json_results = [] if 'masks' in results[0] else None
for idx, result in enumerate(results):
image_id = result.get('img_id', idx)
labels = result['labels']
bboxes = result['bboxes']
scores = result['scores']
# bbox results
for i, label in enumerate(labels):
data = dict()
data['image_id'] = image_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(scores[i])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
if segm_json_results is None:
continue
# segm results
masks = result['masks']
mask_scores = result.get('mask_scores', scores)
for i, label in enumerate(labels):
data = dict()
data['image_id'] = image_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_scores[i])
data['category_id'] = self.cat_ids[label]
if isinstance(masks[i]['counts'], bytes):
masks[i]['counts'] = masks[i]['counts'].decode()
data['segmentation'] = masks[i]
segm_json_results.append(data)
result_files = dict()
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
dump(bbox_json_results, result_files['bbox'])
if segm_json_results is not None:
result_files['segm'] = f'{outfile_prefix}.segm.json'
dump(segm_json_results, result_files['segm'])
return result_files
def gt_to_coco_json(self, gt_dicts: Sequence[dict],
outfile_prefix: str) -> str:
"""Convert ground truth to coco format json file.
Args:
gt_dicts (Sequence[dict]): Ground truth of the dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json file will be named
"somepath/xxx.gt.json".
Returns:
str: The filename of the json file.
"""
categories = [
dict(id=id, name=name)
for id, name in enumerate(self.dataset_meta['classes'])
]
image_infos = []
annotations = []
for idx, gt_dict in enumerate(gt_dicts):
img_id = gt_dict.get('img_id', idx)
image_info = dict(
id=img_id,
width=gt_dict['width'],
height=gt_dict['height'],
file_name='')
image_infos.append(image_info)
for ann in gt_dict['anns']:
label = ann['bbox_label']
bbox = ann['bbox']
coco_bbox = [
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1],
]
annotation = dict(
id=len(annotations) +
1, # coco api requires id starts with 1
image_id=img_id,
bbox=coco_bbox,
iscrowd=ann.get('ignore_flag', 0),
category_id=int(label),
area=coco_bbox[2] * coco_bbox[3])
if ann.get('mask', None):
mask = ann['mask']
# area = mask_util.area(mask)
if isinstance(mask, dict) and isinstance(
mask['counts'], bytes):
mask['counts'] = mask['counts'].decode()
annotation['segmentation'] = mask
# annotation['area'] = float(area)
annotations.append(annotation)
info = dict(
date_created=str(datetime.datetime.now()),
description='Coco json file converted by mmdet CocoMetric.')
coco_json = dict(
info=info,
images=image_infos,
categories=categories,
licenses=None,
)
if len(annotations) > 0:
coco_json['annotations'] = annotations
converted_json_path = f'{outfile_prefix}.gt.json'
dump(coco_json, converted_json_path)
return converted_json_path
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
result = dict()
pred = data_sample['pred_instances']
result['img_id'] = data_sample['img_id']
result['bboxes'] = pred['bboxes'].cpu().numpy()
result['scores'] = pred['scores'].cpu().numpy()
result['labels'] = pred['labels'].cpu().numpy()
# encode mask to RLE
if 'masks' in pred:
result['masks'] = encode_mask_results(
pred['masks'].detach().cpu().numpy()) if isinstance(
pred['masks'], torch.Tensor) else pred['masks']
# some detectors use different scores for bbox and mask
if 'mask_scores' in pred:
result['mask_scores'] = pred['mask_scores'].cpu().numpy()
# parse gt
gt = dict()
gt['width'] = data_sample['ori_shape'][1]
gt['height'] = data_sample['ori_shape'][0]
gt['img_id'] = data_sample['img_id']
if self._coco_api is None:
# TODO: Need to refactor to support LoadAnnotations
assert 'instances' in data_sample, \
'ground truth is required for evaluation when ' \
'`ann_file` is not provided'
gt['anns'] = data_sample['instances']
# add converted result to the results list
self.results.append((gt, result))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
# split gt and prediction list
gts, preds = zip(*results)
tmp_dir = None
if self.outfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
outfile_prefix = osp.join(tmp_dir.name, 'results')
else:
outfile_prefix = self.outfile_prefix
if self._coco_api is None:
# use converted gt json file to initialize coco api
logger.info('Converting ground truth to coco format...')
coco_json_path = self.gt_to_coco_json(
gt_dicts=gts, outfile_prefix=outfile_prefix)
self._coco_api = COCO(coco_json_path)
# handle lazy init
if self.cat_ids is None:
self.cat_ids = self._coco_api.get_cat_ids(
cat_names=self.dataset_meta['classes'])
if self.img_ids is None:
self.img_ids = self._coco_api.get_img_ids()
# convert predictions to coco format and dump to json file
result_files = self.results2json(preds, outfile_prefix)
eval_results = OrderedDict()
if self.format_only:
logger.info('results are saved in '
f'{osp.dirname(outfile_prefix)}')
return eval_results
for metric in self.metrics:
logger.info(f'Evaluating {metric}...')
# TODO: May refactor fast_eval_recall to an independent metric?
# fast eval recall
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
preds, self.proposal_nums, self.iou_thrs, logger=logger)
log_msg = []
for i, num in enumerate(self.proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
logger.info(log_msg)
continue
# evaluate proposal, bbox and segm
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox,
# cocoapi will use the box area instead of the mask area
# for calculating the instance area. Though the overall AP
# is not affected, this leads to different
# small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
coco_dt = self._coco_api.loadRes(predictions)
except IndexError:
logger.error(
'The testing results of the whole dataset is empty.')
break
coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)
coco_eval.params.catIds = self.cat_ids
coco_eval.params.imgIds = self.img_ids
coco_eval.params.maxDets = list(self.proposal_nums)
coco_eval.params.iouThrs = self.iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
metric_items = self.metric_items
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item "{metric_item}" is not supported')
if metric == 'proposal':
coco_eval.params.useCats = 0
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{coco_eval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if self.classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = coco_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, cat_id in enumerate(self.cat_ids):
t = []
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self._coco_api.loadCats(cat_id)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
t.append(f'{nm["name"]}')
t.append(f'{round(ap, 3)}')
eval_results[f'{nm["name"]}_precision'] = round(ap, 3)
# indexes of IoU @50 and @75
for iou in [0, 5]:
precision = precisions[iou, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
t.append(f'{round(ap, 3)}')
# indexes of area of small, median and large
for area in [1, 2, 3]:
precision = precisions[:, :, idx, area, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
t.append(f'{round(ap, 3)}')
results_per_category.append(tuple(t))
num_columns = len(results_per_category[0])
results_flatten = list(
itertools.chain(*results_per_category))
headers = [
'category', 'mAP', 'mAP_50', 'mAP_75', 'mAP_s',
'mAP_m', 'mAP_l'
]
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
logger.info('\n' + table.table)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = coco_eval.stats[coco_metric_names[metric_item]]
eval_results[key] = float(f'{round(val, 3)}')
ap = coco_eval.stats[:6]
logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} '
f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 25,325 | 41.852792 | 125 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/crowdhuman_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import json
import os.path as osp
import tempfile
from collections import OrderedDict
from multiprocessing import Process, Queue
from typing import Dict, List, Optional, Sequence, Union
import numpy as np
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump, get_text, load
from mmengine.logging import MMLogger
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import maximum_bipartite_matching
from mmdet.evaluation.functional.bbox_overlaps import bbox_overlaps
from mmdet.registry import METRICS
PERSON_CLASSES = ['background', 'person']
@METRICS.register_module()
class CrowdHumanMetric(BaseMetric):
"""CrowdHuman evaluation metric.
Evaluate Average Precision (AP), Miss Rate (MR) and Jaccard Index (JI)
for detection tasks.
Args:
ann_file (str): Path to the annotation file.
metric (str | List[str]): Metrics to be evaluated. Valid metrics
include 'AP', 'MR' and 'JI'. Defaults to 'AP'.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
outfile_prefix (str, optional): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
eval_mode (int): Select the mode of evaluate. Valid mode include
0(just body box), 1(just head box) and 2(both of them).
Defaults to 0.
iou_thres (float): IoU threshold. Defaults to 0.5.
compare_matching_method (str, optional): Matching method to compare
the detection results with the ground_truth when compute 'AP'
and 'MR'.Valid method include VOC and None(CALTECH). Default to
None.
mr_ref (str): Different parameter selection to calculate MR. Valid
ref include CALTECH_-2 and CALTECH_-4. Defaults to CALTECH_-2.
num_ji_process (int): The number of processes to evaluation JI.
Defaults to 10.
"""
default_prefix: Optional[str] = 'crowd_human'
def __init__(self,
ann_file: str,
metric: Union[str, List[str]] = ['AP', 'MR', 'JI'],
format_only: bool = False,
outfile_prefix: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
eval_mode: int = 0,
iou_thres: float = 0.5,
compare_matching_method: Optional[str] = None,
mr_ref: str = 'CALTECH_-2',
num_ji_process: int = 10) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.ann_file = ann_file
# crowdhuman evaluation metrics
self.metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['MR', 'AP', 'JI']
for metric in self.metrics:
if metric not in allowed_metrics:
raise KeyError(f"metric should be one of 'MR', 'AP', 'JI',"
f'but got {metric}.')
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
self.outfile_prefix = outfile_prefix
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
assert eval_mode in [0, 1, 2], \
"Unknown eval mode. mr_ref should be one of '0', '1', '2'."
assert compare_matching_method is None or \
compare_matching_method == 'VOC', \
'The alternative compare_matching_method is VOC.' \
'This parameter defaults to CALTECH(None)'
assert mr_ref == 'CALTECH_-2' or mr_ref == 'CALTECH_-4', \
"mr_ref should be one of 'CALTECH_-2', 'CALTECH_-4'."
self.eval_mode = eval_mode
self.iou_thres = iou_thres
self.compare_matching_method = compare_matching_method
self.mr_ref = mr_ref
self.num_ji_process = num_ji_process
@staticmethod
def results2json(results: Sequence[dict], outfile_prefix: str) -> str:
"""Dump the detection results to a json file."""
result_file_path = f'{outfile_prefix}.json'
bbox_json_results = []
for i, result in enumerate(results):
ann, pred = result
dump_dict = dict()
dump_dict['ID'] = ann['ID']
dump_dict['width'] = ann['width']
dump_dict['height'] = ann['height']
dtboxes = []
bboxes = pred.tolist()
for _, single_bbox in enumerate(bboxes):
temp_dict = dict()
x1, y1, x2, y2, score = single_bbox
temp_dict['box'] = [x1, y1, x2 - x1, y2 - y1]
temp_dict['score'] = score
temp_dict['tag'] = 1
dtboxes.append(temp_dict)
dump_dict['dtboxes'] = dtboxes
bbox_json_results.append(dump_dict)
dump(bbox_json_results, result_file_path)
return result_file_path
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
ann = dict()
ann['ID'] = data_sample['img_id']
ann['width'] = data_sample['ori_shape'][1]
ann['height'] = data_sample['ori_shape'][0]
pred_bboxes = data_sample['pred_instances']['bboxes'].cpu().numpy()
pred_scores = data_sample['pred_instances']['scores'].cpu().numpy()
pred_bbox_scores = np.hstack(
[pred_bboxes, pred_scores.reshape((-1, 1))])
self.results.append((ann, pred_bbox_scores))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
eval_results(Dict[str, float]): The computed metrics.
The keys are the names of the metrics, and the values
are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
tmp_dir = None
if self.outfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
outfile_prefix = osp.join(tmp_dir.name, 'result')
else:
outfile_prefix = self.outfile_prefix
# convert predictions to coco format and dump to json file
result_file = self.results2json(results, outfile_prefix)
eval_results = OrderedDict()
if self.format_only:
logger.info(f'results are saved in {osp.dirname(outfile_prefix)}')
return eval_results
# load evaluation samples
eval_samples = self.load_eval_samples(result_file)
if 'AP' in self.metrics or 'MR' in self.metrics:
score_list = self.compare(eval_samples)
gt_num = sum([eval_samples[i].gt_num for i in eval_samples])
ign_num = sum([eval_samples[i].ign_num for i in eval_samples])
gt_num = gt_num - ign_num
img_num = len(eval_samples)
for metric in self.metrics:
logger.info(f'Evaluating {metric}...')
if metric == 'AP':
AP = self.eval_ap(score_list, gt_num, img_num)
eval_results['mAP'] = float(f'{round(AP, 4)}')
if metric == 'MR':
MR = self.eval_mr(score_list, gt_num, img_num)
eval_results['mMR'] = float(f'{round(MR, 4)}')
if metric == 'JI':
JI = self.eval_ji(eval_samples)
eval_results['JI'] = float(f'{round(JI, 4)}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
def load_eval_samples(self, result_file):
"""Load data from annotations file and detection results.
Args:
result_file (str): The file path of the saved detection results.
Returns:
Dict[Image]: The detection result packaged by Image
"""
gt_str = get_text(
self.ann_file, backend_args=self.backend_args).strip().split('\n')
gt_records = [json.loads(line) for line in gt_str]
pred_records = load(result_file, backend_args=self.backend_args)
eval_samples = dict()
for gt_record, pred_record in zip(gt_records, pred_records):
assert gt_record['ID'] == pred_record['ID'], \
'please set val_dataloader.sampler.shuffle=False and try again'
eval_samples[pred_record['ID']] = Image(self.eval_mode)
eval_samples[pred_record['ID']].load(gt_record, 'box', None,
PERSON_CLASSES, True)
eval_samples[pred_record['ID']].load(pred_record, 'box', None,
PERSON_CLASSES, False)
eval_samples[pred_record['ID']].clip_all_boader()
return eval_samples
def compare(self, samples):
"""Match the detection results with the ground_truth.
Args:
samples (dict[Image]): The detection result packaged by Image.
Returns:
score_list(list[tuple[ndarray, int, str]]): Matching result.
a list of tuples (dtbox, label, imgID) in the descending
sort of dtbox.score.
"""
score_list = list()
for id in samples:
if self.compare_matching_method == 'VOC':
result = samples[id].compare_voc(self.iou_thres)
else:
result = samples[id].compare_caltech(self.iou_thres)
score_list.extend(result)
# In the descending sort of dtbox score.
score_list.sort(key=lambda x: x[0][-1], reverse=True)
return score_list
@staticmethod
def eval_ap(score_list, gt_num, img_num):
"""Evaluate by average precision.
Args:
score_list(list[tuple[ndarray, int, str]]): Matching result.
a list of tuples (dtbox, label, imgID) in the descending
sort of dtbox.score.
gt_num(int): The number of gt boxes in the entire dataset.
img_num(int): The number of images in the entire dataset.
Returns:
ap(float): result of average precision.
"""
# calculate general ap score
def _calculate_map(_recall, _precision):
assert len(_recall) == len(_precision)
area = 0
for k in range(1, len(_recall)):
delta_h = (_precision[k - 1] + _precision[k]) / 2
delta_w = _recall[k] - _recall[k - 1]
area += delta_w * delta_h
return area
tp, fp = 0.0, 0.0
rpX, rpY = list(), list()
fpn = []
recalln = []
thr = []
fppi = []
for i, item in enumerate(score_list):
if item[1] == 1:
tp += 1.0
elif item[1] == 0:
fp += 1.0
fn = gt_num - tp
recall = tp / (tp + fn)
precision = tp / (tp + fp)
rpX.append(recall)
rpY.append(precision)
fpn.append(fp)
recalln.append(tp)
thr.append(item[0][-1])
fppi.append(fp / img_num)
ap = _calculate_map(rpX, rpY)
return ap
def eval_mr(self, score_list, gt_num, img_num):
"""Evaluate by Caltech-style log-average miss rate.
Args:
score_list(list[tuple[ndarray, int, str]]): Matching result.
a list of tuples (dtbox, label, imgID) in the descending
sort of dtbox.score.
gt_num(int): The number of gt boxes in the entire dataset.
img_num(int): The number of image in the entire dataset.
Returns:
mr(float): result of miss rate.
"""
# find greater_than
def _find_gt(lst, target):
for idx, _item in enumerate(lst):
if _item >= target:
return idx
return len(lst) - 1
if self.mr_ref == 'CALTECH_-2':
# CALTECH_MRREF_2: anchor points (from 10^-2 to 1) as in
# P.Dollar's paper
ref = [
0.0100, 0.0178, 0.03160, 0.0562, 0.1000, 0.1778, 0.3162,
0.5623, 1.000
]
else:
# CALTECH_MRREF_4: anchor points (from 10^-4 to 1) as in
# S.Zhang's paper
ref = [
0.0001, 0.0003, 0.00100, 0.0032, 0.0100, 0.0316, 0.1000,
0.3162, 1.000
]
tp, fp = 0.0, 0.0
fppiX, fppiY = list(), list()
for i, item in enumerate(score_list):
if item[1] == 1:
tp += 1.0
elif item[1] == 0:
fp += 1.0
fn = gt_num - tp
recall = tp / (tp + fn)
missrate = 1.0 - recall
fppi = fp / img_num
fppiX.append(fppi)
fppiY.append(missrate)
score = list()
for pos in ref:
argmin = _find_gt(fppiX, pos)
if argmin >= 0:
score.append(fppiY[argmin])
score = np.array(score)
mr = np.exp(np.log(score).mean())
return mr
def eval_ji(self, samples):
"""Evaluate by JI using multi_process.
Args:
samples(Dict[str, Image]): The detection result packaged by Image.
Returns:
ji(float): result of jaccard index.
"""
import math
res_line = []
res_ji = []
for i in range(10):
score_thr = 1e-1 * i
total = len(samples)
stride = math.ceil(total / self.num_ji_process)
result_queue = Queue(10000)
results, procs = [], []
records = list(samples.items())
for i in range(self.num_ji_process):
start = i * stride
end = np.min([start + stride, total])
sample_data = dict(records[start:end])
p = Process(
target=self.compute_ji_with_ignore,
args=(result_queue, sample_data, score_thr))
p.start()
procs.append(p)
for i in range(total):
t = result_queue.get()
results.append(t)
for p in procs:
p.join()
line, mean_ratio = self.gather(results)
line = 'score_thr:{:.1f}, {}'.format(score_thr, line)
res_line.append(line)
res_ji.append(mean_ratio)
return max(res_ji)
def compute_ji_with_ignore(self, result_queue, dt_result, score_thr):
"""Compute JI with ignore.
Args:
result_queue(Queue): The Queue for save compute result when
multi_process.
dt_result(dict[Image]): Detection result packaged by Image.
score_thr(float): The threshold of detection score.
Returns:
dict: compute result.
"""
for ID, record in dt_result.items():
gt_boxes = record.gt_boxes
dt_boxes = record.dt_boxes
keep = dt_boxes[:, -1] > score_thr
dt_boxes = dt_boxes[keep][:, :-1]
gt_tag = np.array(gt_boxes[:, -1] != -1)
matches = self.compute_ji_matching(dt_boxes, gt_boxes[gt_tag, :4])
# get the unmatched_indices
matched_indices = np.array([j for (j, _) in matches])
unmatched_indices = list(
set(np.arange(dt_boxes.shape[0])) - set(matched_indices))
num_ignore_dt = self.get_ignores(dt_boxes[unmatched_indices],
gt_boxes[~gt_tag, :4])
matched_indices = np.array([j for (_, j) in matches])
unmatched_indices = list(
set(np.arange(gt_boxes[gt_tag].shape[0])) -
set(matched_indices))
num_ignore_gt = self.get_ignores(
gt_boxes[gt_tag][unmatched_indices], gt_boxes[~gt_tag, :4])
# compute results
eps = 1e-6
k = len(matches)
m = gt_tag.sum() - num_ignore_gt
n = dt_boxes.shape[0] - num_ignore_dt
ratio = k / (m + n - k + eps)
recall = k / (m + eps)
cover = k / (n + eps)
noise = 1 - cover
result_dict = dict(
ratio=ratio,
recall=recall,
cover=cover,
noise=noise,
k=k,
m=m,
n=n)
result_queue.put_nowait(result_dict)
@staticmethod
def gather(results):
"""Integrate test results."""
assert len(results)
img_num = 0
for result in results:
if result['n'] != 0 or result['m'] != 0:
img_num += 1
mean_ratio = np.sum([rb['ratio'] for rb in results]) / img_num
valids = np.sum([rb['k'] for rb in results])
total = np.sum([rb['n'] for rb in results])
gtn = np.sum([rb['m'] for rb in results])
line = 'mean_ratio:{:.4f}, valids:{}, total:{}, gtn:{}'\
.format(mean_ratio, valids, total, gtn)
return line, mean_ratio
def compute_ji_matching(self, dt_boxes, gt_boxes):
"""Match the annotation box for each detection box.
Args:
dt_boxes(ndarray): Detection boxes.
gt_boxes(ndarray): Ground_truth boxes.
Returns:
matches_(list[tuple[int, int]]): Match result.
"""
assert dt_boxes.shape[-1] > 3 and gt_boxes.shape[-1] > 3
if dt_boxes.shape[0] < 1 or gt_boxes.shape[0] < 1:
return list()
ious = bbox_overlaps(dt_boxes, gt_boxes, mode='iou')
input_ = copy.deepcopy(ious)
input_[input_ < self.iou_thres] = 0
match_scipy = maximum_bipartite_matching(
csr_matrix(input_), perm_type='column')
matches_ = []
for i in range(len(match_scipy)):
if match_scipy[i] != -1:
matches_.append((i, int(match_scipy[i])))
return matches_
def get_ignores(self, dt_boxes, gt_boxes):
"""Get the number of ignore bboxes."""
if gt_boxes.size:
ioas = bbox_overlaps(dt_boxes, gt_boxes, mode='iof')
ioas = np.max(ioas, axis=1)
rows = np.where(ioas > self.iou_thres)[0]
return len(rows)
else:
return 0
class Image(object):
"""Data structure for evaluation of CrowdHuman.
Note:
This implementation is modified from https://github.com/Purkialo/
CrowdDet/blob/master/lib/evaluate/APMRToolkits/image.py
Args:
mode (int): Select the mode of evaluate. Valid mode include
0(just body box), 1(just head box) and 2(both of them).
Defaults to 0.
"""
def __init__(self, mode):
self.ID = None
self.width = None
self.height = None
self.dt_boxes = None
self.gt_boxes = None
self.eval_mode = mode
self.ign_num = None
self.gt_num = None
self.dt_num = None
def load(self, record, body_key, head_key, class_names, gt_flag):
"""Loading information for evaluation.
Args:
record (dict): Label information or test results.
The format might look something like this:
{
'ID': '273271,c9db000d5146c15',
'gtboxes': [
{'fbox': [72, 202, 163, 503], 'tag': 'person', ...},
{'fbox': [199, 180, 144, 499], 'tag': 'person', ...},
...
]
}
or:
{
'ID': '273271,c9db000d5146c15',
'width': 800,
'height': 1067,
'dtboxes': [
{
'box': [306.22, 205.95, 164.05, 394.04],
'score': 0.99,
'tag': 1
},
{
'box': [403.60, 178.66, 157.15, 421.33],
'score': 0.99,
'tag': 1
},
...
]
}
body_key (str, None): key of detection body box.
Valid when loading detection results and self.eval_mode!=1.
head_key (str, None): key of detection head box.
Valid when loading detection results and self.eval_mode!=0.
class_names (list[str]):class names of data set.
Defaults to ['background', 'person'].
gt_flag (bool): Indicate whether record is ground truth
or predicting the outcome.
"""
if 'ID' in record and self.ID is None:
self.ID = record['ID']
if 'width' in record and self.width is None:
self.width = record['width']
if 'height' in record and self.height is None:
self.height = record['height']
if gt_flag:
self.gt_num = len(record['gtboxes'])
body_bbox, head_bbox = self.load_gt_boxes(record, 'gtboxes',
class_names)
if self.eval_mode == 0:
self.gt_boxes = body_bbox
self.ign_num = (body_bbox[:, -1] == -1).sum()
elif self.eval_mode == 1:
self.gt_boxes = head_bbox
self.ign_num = (head_bbox[:, -1] == -1).sum()
else:
gt_tag = np.array([
body_bbox[i, -1] != -1 and head_bbox[i, -1] != -1
for i in range(len(body_bbox))
])
self.ign_num = (gt_tag == 0).sum()
self.gt_boxes = np.hstack(
(body_bbox[:, :-1], head_bbox[:, :-1],
gt_tag.reshape(-1, 1)))
if not gt_flag:
self.dt_num = len(record['dtboxes'])
if self.eval_mode == 0:
self.dt_boxes = self.load_det_boxes(record, 'dtboxes',
body_key, 'score')
elif self.eval_mode == 1:
self.dt_boxes = self.load_det_boxes(record, 'dtboxes',
head_key, 'score')
else:
body_dtboxes = self.load_det_boxes(record, 'dtboxes', body_key,
'score')
head_dtboxes = self.load_det_boxes(record, 'dtboxes', head_key,
'score')
self.dt_boxes = np.hstack((body_dtboxes, head_dtboxes))
@staticmethod
def load_gt_boxes(dict_input, key_name, class_names):
"""load ground_truth and transform [x, y, w, h] to [x1, y1, x2, y2]"""
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 5])
head_bbox = []
body_bbox = []
for rb in dict_input[key_name]:
if rb['tag'] in class_names:
body_tag = class_names.index(rb['tag'])
head_tag = copy.deepcopy(body_tag)
else:
body_tag = -1
head_tag = -1
if 'extra' in rb:
if 'ignore' in rb['extra']:
if rb['extra']['ignore'] != 0:
body_tag = -1
head_tag = -1
if 'head_attr' in rb:
if 'ignore' in rb['head_attr']:
if rb['head_attr']['ignore'] != 0:
head_tag = -1
head_bbox.append(np.hstack((rb['hbox'], head_tag)))
body_bbox.append(np.hstack((rb['fbox'], body_tag)))
head_bbox = np.array(head_bbox)
head_bbox[:, 2:4] += head_bbox[:, :2]
body_bbox = np.array(body_bbox)
body_bbox[:, 2:4] += body_bbox[:, :2]
return body_bbox, head_bbox
@staticmethod
def load_det_boxes(dict_input, key_name, key_box, key_score, key_tag=None):
"""load detection boxes."""
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 5])
else:
assert key_box in dict_input[key_name][0]
if key_score:
assert key_score in dict_input[key_name][0]
if key_tag:
assert key_tag in dict_input[key_name][0]
if key_score:
if key_tag:
bboxes = np.vstack([
np.hstack((rb[key_box], rb[key_score], rb[key_tag]))
for rb in dict_input[key_name]
])
else:
bboxes = np.vstack([
np.hstack((rb[key_box], rb[key_score]))
for rb in dict_input[key_name]
])
else:
if key_tag:
bboxes = np.vstack([
np.hstack((rb[key_box], rb[key_tag]))
for rb in dict_input[key_name]
])
else:
bboxes = np.vstack(
[rb[key_box] for rb in dict_input[key_name]])
bboxes[:, 2:4] += bboxes[:, :2]
return bboxes
def clip_all_boader(self):
"""Make sure boxes are within the image range."""
def _clip_boundary(boxes, height, width):
assert boxes.shape[-1] >= 4
boxes[:, 0] = np.minimum(np.maximum(boxes[:, 0], 0), width - 1)
boxes[:, 1] = np.minimum(np.maximum(boxes[:, 1], 0), height - 1)
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], width), 0)
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], height), 0)
return boxes
assert self.dt_boxes.shape[-1] >= 4
assert self.gt_boxes.shape[-1] >= 4
assert self.width is not None and self.height is not None
if self.eval_mode == 2:
self.dt_boxes[:, :4] = _clip_boundary(self.dt_boxes[:, :4],
self.height, self.width)
self.gt_boxes[:, :4] = _clip_boundary(self.gt_boxes[:, :4],
self.height, self.width)
self.dt_boxes[:, 4:8] = _clip_boundary(self.dt_boxes[:, 4:8],
self.height, self.width)
self.gt_boxes[:, 4:8] = _clip_boundary(self.gt_boxes[:, 4:8],
self.height, self.width)
else:
self.dt_boxes = _clip_boundary(self.dt_boxes, self.height,
self.width)
self.gt_boxes = _clip_boundary(self.gt_boxes, self.height,
self.width)
def compare_voc(self, thres):
"""Match the detection results with the ground_truth by VOC.
Args:
thres (float): IOU threshold.
Returns:
score_list(list[tuple[ndarray, int, str]]): Matching result.
a list of tuples (dtbox, label, imgID) in the descending
sort of dtbox.score.
"""
if self.dt_boxes is None:
return list()
dtboxes = self.dt_boxes
gtboxes = self.gt_boxes if self.gt_boxes is not None else list()
dtboxes.sort(key=lambda x: x.score, reverse=True)
gtboxes.sort(key=lambda x: x.ign)
score_list = list()
for i, dt in enumerate(dtboxes):
maxpos = -1
maxiou = thres
for j, gt in enumerate(gtboxes):
overlap = dt.iou(gt)
if overlap > maxiou:
maxiou = overlap
maxpos = j
if maxpos >= 0:
if gtboxes[maxpos].ign == 0:
gtboxes[maxpos].matched = 1
dtboxes[i].matched = 1
score_list.append((dt, self.ID))
else:
dtboxes[i].matched = -1
else:
dtboxes[i].matched = 0
score_list.append((dt, self.ID))
return score_list
def compare_caltech(self, thres):
"""Match the detection results with the ground_truth by Caltech
matching strategy.
Args:
thres (float): IOU threshold.
Returns:
score_list(list[tuple[ndarray, int, str]]): Matching result.
a list of tuples (dtbox, label, imgID) in the descending
sort of dtbox.score.
"""
if self.dt_boxes is None or self.gt_boxes is None:
return list()
dtboxes = self.dt_boxes if self.dt_boxes is not None else list()
gtboxes = self.gt_boxes if self.gt_boxes is not None else list()
dt_matched = np.zeros(dtboxes.shape[0])
gt_matched = np.zeros(gtboxes.shape[0])
dtboxes = np.array(sorted(dtboxes, key=lambda x: x[-1], reverse=True))
gtboxes = np.array(sorted(gtboxes, key=lambda x: x[-1], reverse=True))
if len(dtboxes):
overlap_iou = bbox_overlaps(dtboxes, gtboxes, mode='iou')
overlap_ioa = bbox_overlaps(dtboxes, gtboxes, mode='iof')
else:
return list()
score_list = list()
for i, dt in enumerate(dtboxes):
maxpos = -1
maxiou = thres
for j, gt in enumerate(gtboxes):
if gt_matched[j] == 1:
continue
if gt[-1] > 0:
overlap = overlap_iou[i][j]
if overlap > maxiou:
maxiou = overlap
maxpos = j
else:
if maxpos >= 0:
break
else:
overlap = overlap_ioa[i][j]
if overlap > thres:
maxiou = overlap
maxpos = j
if maxpos >= 0:
if gtboxes[maxpos, -1] > 0:
gt_matched[maxpos] = 1
dt_matched[i] = 1
score_list.append((dt, 1, self.ID))
else:
dt_matched[i] = -1
else:
dt_matched[i] = 0
score_list.append((dt, 0, self.ID))
return score_list
| 32,675 | 38.607273 | 125 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/cityscapes_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import shutil
import tempfile
from collections import OrderedDict
from typing import Dict, Optional, Sequence
import mmcv
import numpy as np
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from mmdet.registry import METRICS
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa: E501
import cityscapesscripts.helpers.labels as CSLabels
from mmdet.evaluation.functional import evaluateImgLists
HAS_CITYSCAPESAPI = True
except ImportError:
HAS_CITYSCAPESAPI = False
@METRICS.register_module()
class CityScapesMetric(BaseMetric):
"""CityScapes metric for instance segmentation.
Args:
outfile_prefix (str): The prefix of txt and png files. The txt and
png file will be save in a directory whose path is
"outfile_prefix.results/".
seg_prefix (str, optional): Path to the directory which contains the
cityscapes instance segmentation masks. It's necessary when
training and validation. It could be None when infer on test
dataset. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
dump_matches (bool): Whether dump matches.json file during evaluating.
Defaults to False.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
default_prefix: Optional[str] = 'cityscapes'
def __init__(self,
outfile_prefix: str,
seg_prefix: Optional[str] = None,
format_only: bool = False,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
dump_matches: bool = False,
file_client_args: dict = None,
backend_args: dict = None) -> None:
if not HAS_CITYSCAPESAPI:
raise RuntimeError('Failed to import `cityscapesscripts`.'
'Please try to install official '
'cityscapesscripts by '
'"pip install cityscapesscripts"')
super().__init__(collect_device=collect_device, prefix=prefix)
self.tmp_dir = None
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
else:
assert seg_prefix is not None, '`seg_prefix` is necessary when '
'computing the CityScapes metrics'
if outfile_prefix is None:
self.tmp_dir = tempfile.TemporaryDirectory()
self.outfile_prefix = osp.join(self.tmp_dir.name, 'results')
else:
# the directory to save predicted panoptic segmentation mask
self.outfile_prefix = osp.join(outfile_prefix, 'results') # type: ignore # yapf: disable # noqa: E501
dir_name = osp.expanduser(self.outfile_prefix)
if osp.exists(dir_name) and is_main_process():
logger: MMLogger = MMLogger.get_current_instance()
logger.info('remove previous results.')
shutil.rmtree(dir_name)
os.makedirs(dir_name, exist_ok=True)
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
self.seg_prefix = seg_prefix
self.dump_matches = dump_matches
def __del__(self) -> None:
"""Clean up the results if necessary."""
if self.tmp_dir is not None:
self.tmp_dir.cleanup()
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
# parse pred
result = dict()
pred = data_sample['pred_instances']
filename = data_sample['img_path']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(self.outfile_prefix, basename + '_pred.txt')
result['pred_txt'] = pred_txt
labels = pred['labels'].cpu().numpy()
masks = pred['masks'].cpu().numpy().astype(np.uint8)
if 'mask_scores' in pred:
# some detectors use different scores for bbox and mask
mask_scores = pred['mask_scores'].cpu().numpy()
else:
mask_scores = pred['scores'].cpu().numpy()
with open(pred_txt, 'w') as f:
for i, (label, mask, mask_score) in enumerate(
zip(labels, masks, mask_scores)):
class_name = self.dataset_meta['classes'][label]
class_id = CSLabels.name2label[class_name].id
png_filename = osp.join(
self.outfile_prefix,
basename + f'_{i}_{class_name}.png')
mmcv.imwrite(mask, png_filename)
f.write(f'{osp.basename(png_filename)} '
f'{class_id} {mask_score}\n')
# parse gt
gt = dict()
img_path = filename.replace('leftImg8bit.png',
'gtFine_instanceIds.png')
gt['file_name'] = img_path.replace('leftImg8bit', 'gtFine')
self.results.append((gt, result))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
if self.format_only:
logger.info(
f'results are saved to {osp.dirname(self.outfile_prefix)}')
return OrderedDict()
logger.info('starts to compute metric')
gts, preds = zip(*results)
# set global states in cityscapes evaluation API
gt_instances_file = osp.join(self.outfile_prefix, 'gtInstances.json') # type: ignore # yapf: disable # noqa: E501
# split gt and prediction list
gts, preds = zip(*results)
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = gt_instances_file
groundTruthImgList = [gt['file_name'] for gt in gts]
predictionImgList = [pred['pred_txt'] for pred in preds]
CSEval_results = evaluateImgLists(
predictionImgList,
groundTruthImgList,
CSEval.args,
self.backend_args,
dump_matches=self.dump_matches)['averages']
eval_results = OrderedDict()
eval_results['mAP'] = CSEval_results['allAp']
eval_results['AP@50'] = CSEval_results['allAp50%']
return eval_results
| 8,769 | 41.572816 | 125 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults'
]
| 722 | 39.166667 | 79 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/openimages_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from collections import OrderedDict
from typing import List, Optional, Sequence, Union
import numpy as np
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger, print_log
from mmdet.registry import METRICS
from ..functional import eval_map
@METRICS.register_module()
class OpenImagesMetric(BaseMetric):
"""OpenImages evaluation metric.
Evaluate detection mAP for OpenImages. Please refer to
https://storage.googleapis.com/openimages/web/evaluation.html for more
details.
Args:
iou_thrs (float or List[float]): IoU threshold. Defaults to 0.5.
ioa_thrs (float or List[float]): IoA threshold. Defaults to 0.5.
scale_ranges (List[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Defaults to None
use_group_of (bool): Whether consider group of groud truth bboxes
during evaluating. Defaults to True.
get_supercategory (bool): Whether to get parent class of the
current class. Default: True.
filter_labels (bool): Whether filter unannotated classes.
Default: True.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'openimages'
def __init__(self,
iou_thrs: Union[float, List[float]] = 0.5,
ioa_thrs: Union[float, List[float]] = 0.5,
scale_ranges: Optional[List[tuple]] = None,
use_group_of: bool = True,
get_supercategory: bool = True,
filter_labels: bool = True,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.iou_thrs = [iou_thrs] if isinstance(iou_thrs, float) else iou_thrs
self.ioa_thrs = [ioa_thrs] if (isinstance(ioa_thrs, float)
or ioa_thrs is None) else ioa_thrs
assert isinstance(self.iou_thrs, list) and isinstance(
self.ioa_thrs, list)
assert len(self.iou_thrs) == len(self.ioa_thrs)
self.scale_ranges = scale_ranges
self.use_group_of = use_group_of
self.get_supercategory = get_supercategory
self.filter_labels = filter_labels
def _get_supercategory_ann(self, instances: List[dict]) -> List[dict]:
"""Get parent classes's annotation of the corresponding class.
Args:
instances (List[dict]): A list of annotations of the instances.
Returns:
List[dict]: Annotations extended with super-category.
"""
supercat_instances = []
relation_matrix = self.dataset_meta['RELATION_MATRIX']
for instance in instances:
labels = np.where(relation_matrix[instance['bbox_label']])[0]
for label in labels:
if label == instance['bbox_label']:
continue
new_instance = copy.deepcopy(instance)
new_instance['bbox_label'] = label
supercat_instances.append(new_instance)
return supercat_instances
def _process_predictions(self, pred_bboxes: np.ndarray,
pred_scores: np.ndarray, pred_labels: np.ndarray,
gt_instances: list,
image_level_labels: np.ndarray) -> tuple:
"""Process results of the corresponding class of the detection bboxes.
Note: It will choose to do the following two processing according to
the parameters:
1. Whether to add parent classes of the corresponding class of the
detection bboxes.
2. Whether to ignore the classes that unannotated on that image.
Args:
pred_bboxes (np.ndarray): bboxes predicted by the model
pred_scores (np.ndarray): scores predicted by the model
pred_labels (np.ndarray): labels predicted by the model
gt_instances (list): ground truth annotations
image_level_labels (np.ndarray): human-verified image level labels
Returns:
tuple: Processed bboxes, scores, and labels.
"""
processed_bboxes = copy.deepcopy(pred_bboxes)
processed_scores = copy.deepcopy(pred_scores)
processed_labels = copy.deepcopy(pred_labels)
gt_labels = np.array([ins['bbox_label'] for ins in gt_instances],
dtype=np.int64)
if image_level_labels is not None:
allowed_classes = np.unique(
np.append(gt_labels, image_level_labels))
else:
allowed_classes = np.unique(gt_labels)
relation_matrix = self.dataset_meta['RELATION_MATRIX']
pred_classes = np.unique(pred_labels)
for pred_class in pred_classes:
classes = np.where(relation_matrix[pred_class])[0]
for cls in classes:
if (cls in allowed_classes and cls != pred_class
and self.get_supercategory):
# add super-supercategory preds
index = np.where(pred_labels == pred_class)[0]
processed_scores = np.concatenate(
[processed_scores, pred_scores[index]])
processed_bboxes = np.concatenate(
[processed_bboxes, pred_bboxes[index]])
extend_labels = np.full(index.shape, cls, dtype=np.int64)
processed_labels = np.concatenate(
[processed_labels, extend_labels])
elif cls not in allowed_classes and self.filter_labels:
# remove unannotated preds
index = np.where(processed_labels != cls)[0]
processed_scores = processed_scores[index]
processed_bboxes = processed_bboxes[index]
processed_labels = processed_labels[index]
return processed_bboxes, processed_scores, processed_labels
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
gt = copy.deepcopy(data_sample)
# add super-category instances
# TODO: Need to refactor to support LoadAnnotations
instances = gt['instances']
if self.get_supercategory:
supercat_instances = self._get_supercategory_ann(instances)
instances.extend(supercat_instances)
gt_labels = []
gt_bboxes = []
is_group_ofs = []
for ins in instances:
gt_labels.append(ins['bbox_label'])
gt_bboxes.append(ins['bbox'])
is_group_ofs.append(ins['is_group_of'])
ann = dict(
labels=np.array(gt_labels, dtype=np.int64),
bboxes=np.array(gt_bboxes, dtype=np.float32).reshape((-1, 4)),
gt_is_group_ofs=np.array(is_group_ofs, dtype=bool))
image_level_labels = gt.get('image_level_labels', None)
pred = data_sample['pred_instances']
pred_bboxes = pred['bboxes'].cpu().numpy()
pred_scores = pred['scores'].cpu().numpy()
pred_labels = pred['labels'].cpu().numpy()
pred_bboxes, pred_scores, pred_labels = self._process_predictions(
pred_bboxes, pred_scores, pred_labels, instances,
image_level_labels)
dets = []
for label in range(len(self.dataset_meta['classes'])):
index = np.where(pred_labels == label)[0]
pred_bbox_scores = np.hstack(
[pred_bboxes[index], pred_scores[index].reshape((-1, 1))])
dets.append(pred_bbox_scores)
self.results.append((ann, dets))
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
logger = MMLogger.get_current_instance()
gts, preds = zip(*results)
eval_results = OrderedDict()
# get dataset type
dataset_type = self.dataset_meta.get('dataset_type')
if dataset_type not in ['oid_challenge', 'oid_v6']:
dataset_type = 'oid_v6'
print_log(
'Cannot infer dataset type from the length of the'
' classes. Set `oid_v6` as dataset type.',
logger='current')
mean_aps = []
for i, (iou_thr,
ioa_thr) in enumerate(zip(self.iou_thrs, self.ioa_thrs)):
if self.use_group_of:
assert ioa_thr is not None, 'ioa_thr must have value when' \
' using group_of in evaluation.'
print_log(f'\n{"-" * 15}iou_thr, ioa_thr: {iou_thr}, {ioa_thr}'
f'{"-" * 15}')
mean_ap, _ = eval_map(
preds,
gts,
scale_ranges=self.scale_ranges,
iou_thr=iou_thr,
ioa_thr=ioa_thr,
dataset=dataset_type,
logger=logger,
use_group_of=self.use_group_of)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
return eval_results
| 10,709 | 44 | 79 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/dump_det_results.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Sequence
from mmengine.evaluator import DumpResults
from mmengine.evaluator.metric import _to_cpu
from mmdet.registry import METRICS
from mmdet.structures.mask import encode_mask_results
@METRICS.register_module()
class DumpDetResults(DumpResults):
"""Dump model predictions to a pickle file for offline evaluation.
Different from `DumpResults` in MMEngine, it compresses instance
segmentation masks into RLE format.
Args:
out_file_path (str): Path of the dumped file. Must end with '.pkl'
or '.pickle'.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
"""
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""transfer tensors in predictions to CPU."""
data_samples = _to_cpu(data_samples)
for data_sample in data_samples:
# remove gt
data_sample.pop('gt_instances', None)
data_sample.pop('ignored_instances', None)
data_sample.pop('gt_panoptic_seg', None)
if 'pred_instances' in data_sample:
pred = data_sample['pred_instances']
# encode mask to RLE
if 'masks' in pred:
pred['masks'] = encode_mask_results(pred['masks'].numpy())
if 'pred_panoptic_seg' in data_sample:
warnings.warn(
'Panoptic segmentation map will not be compressed. '
'The dumped file will be extremely large! '
'Suggest using `CocoPanopticMetric` to save the coco '
'format json and segmentation png files directly.')
self.results.extend(data_samples)
| 1,888 | 38.354167 | 78 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/lvis_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
from typing import Dict, List, Optional, Sequence, Union
import numpy as np
from mmengine.fileio import get_local_path
from mmengine.logging import MMLogger
from terminaltables import AsciiTable
from mmdet.registry import METRICS
from mmdet.structures.mask import encode_mask_results
from ..functional import eval_recalls
from .coco_metric import CocoMetric
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS, LVISEval, LVISResults
except ImportError:
lvis = None
LVISEval = None
LVISResults = None
@METRICS.register_module()
class LVISMetric(CocoMetric):
"""LVIS evaluation metric.
Args:
ann_file (str, optional): Path to the coco format annotation file.
If not specified, ground truth annotations from the dataset will
be converted to coco format. Defaults to None.
metric (str | List[str]): Metrics to be evaluated. Valid metrics
include 'bbox', 'segm', 'proposal', and 'proposal_fast'.
Defaults to 'bbox'.
classwise (bool): Whether to evaluate the metric class-wise.
Defaults to False.
proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.
Defaults to (100, 300, 1000).
iou_thrs (float | List[float], optional): IoU threshold to compute AP
and AR. If not specified, IoUs from 0.5 to 0.95 will be used.
Defaults to None.
metric_items (List[str], optional): Metric result names to be
recorded in the evaluation result. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
outfile_prefix (str, optional): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
default_prefix: Optional[str] = 'lvis'
def __init__(self,
ann_file: Optional[str] = None,
metric: Union[str, List[str]] = 'bbox',
classwise: bool = False,
proposal_nums: Sequence[int] = (100, 300, 1000),
iou_thrs: Optional[Union[float, Sequence[float]]] = None,
metric_items: Optional[Sequence[str]] = None,
format_only: bool = False,
outfile_prefix: Optional[str] = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None) -> None:
if lvis is None:
raise RuntimeError(
'Package lvis is not installed. Please run "pip install '
'git+https://github.com/lvis-dataset/lvis-api.git".')
super().__init__(collect_device=collect_device, prefix=prefix)
# coco evaluation metrics
self.metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in self.metrics:
if metric not in allowed_metrics:
raise KeyError(
"metric should be one of 'bbox', 'segm', 'proposal', "
f"'proposal_fast', but got {metric}.")
# do class wise evaluation, default False
self.classwise = classwise
# proposal_nums used to compute recall or precision.
self.proposal_nums = list(proposal_nums)
# iou_thrs used to compute recall or precision.
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.iou_thrs = iou_thrs
self.metric_items = metric_items
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
self.outfile_prefix = outfile_prefix
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
# if ann_file is not specified,
# initialize lvis api with the converted dataset
if ann_file is not None:
with get_local_path(
ann_file, backend_args=self.backend_args) as local_path:
self._lvis_api = LVIS(local_path)
else:
self._lvis_api = None
# handle dataset lazy init
self.cat_ids = None
self.img_ids = None
def fast_eval_recall(self,
results: List[dict],
proposal_nums: Sequence[int],
iou_thrs: Sequence[float],
logger: Optional[MMLogger] = None) -> np.ndarray:
"""Evaluate proposal recall with LVIS's fast_eval_recall.
Args:
results (List[dict]): Results of the dataset.
proposal_nums (Sequence[int]): Proposal numbers used for
evaluation.
iou_thrs (Sequence[float]): IoU thresholds used for evaluation.
logger (MMLogger, optional): Logger used for logging the recall
summary.
Returns:
np.ndarray: Averaged recall results.
"""
gt_bboxes = []
pred_bboxes = [result['bboxes'] for result in results]
for i in range(len(self.img_ids)):
ann_ids = self._lvis_api.get_ann_ids(img_ids=[self.img_ids[i]])
ann_info = self._lvis_api.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
result = dict()
pred = data_sample['pred_instances']
result['img_id'] = data_sample['img_id']
result['bboxes'] = pred['bboxes'].cpu().numpy()
result['scores'] = pred['scores'].cpu().numpy()
result['labels'] = pred['labels'].cpu().numpy()
# encode mask to RLE
if 'masks' in pred:
result['masks'] = encode_mask_results(
pred['masks'].detach().cpu().numpy())
# some detectors use different scores for bbox and mask
if 'mask_scores' in pred:
result['mask_scores'] = pred['mask_scores'].cpu().numpy()
# parse gt
gt = dict()
gt['width'] = data_sample['ori_shape'][1]
gt['height'] = data_sample['ori_shape'][0]
gt['img_id'] = data_sample['img_id']
if self._lvis_api is None:
# TODO: Need to refactor to support LoadAnnotations
assert 'instances' in data_sample, \
'ground truth is required for evaluation when ' \
'`ann_file` is not provided'
gt['anns'] = data_sample['instances']
# add converted result to the results list
self.results.append((gt, result))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
# split gt and prediction list
gts, preds = zip(*results)
tmp_dir = None
if self.outfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
outfile_prefix = osp.join(tmp_dir.name, 'results')
else:
outfile_prefix = self.outfile_prefix
if self._lvis_api is None:
# use converted gt json file to initialize coco api
logger.info('Converting ground truth to coco format...')
coco_json_path = self.gt_to_coco_json(
gt_dicts=gts, outfile_prefix=outfile_prefix)
self._lvis_api = LVIS(coco_json_path)
# handle lazy init
if self.cat_ids is None:
self.cat_ids = self._lvis_api.get_cat_ids()
if self.img_ids is None:
self.img_ids = self._lvis_api.get_img_ids()
# convert predictions to coco format and dump to json file
result_files = self.results2json(preds, outfile_prefix)
eval_results = OrderedDict()
if self.format_only:
logger.info('results are saved in '
f'{osp.dirname(outfile_prefix)}')
return eval_results
lvis_gt = self._lvis_api
for metric in self.metrics:
logger.info(f'Evaluating {metric}...')
# TODO: May refactor fast_eval_recall to an independent metric?
# fast eval recall
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
preds, self.proposal_nums, self.iou_thrs, logger=logger)
log_msg = []
for i, num in enumerate(self.proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
logger.info(log_msg)
continue
try:
lvis_dt = LVISResults(lvis_gt, result_files[metric])
except IndexError:
logger.info(
'The testing results of the whole dataset is empty.')
break
iou_type = 'bbox' if metric == 'proposal' else metric
lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
lvis_eval.params.imgIds = self.img_ids
metric_items = self.metric_items
if metric == 'proposal':
lvis_eval.params.useCats = 0
lvis_eval.params.maxDets = list(self.proposal_nums)
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
if metric_items is None:
metric_items = ['AR@300', 'ARs@300', 'ARm@300', 'ARl@300']
for k, v in lvis_eval.get_results().items():
if k in metric_items:
val = float('{:.3f}'.format(float(v)))
eval_results[k] = val
else:
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
lvis_results = lvis_eval.get_results()
if self.classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = lvis_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
# the dimensions of precisions are
# [num_thrs, num_recalls, num_cats, num_area_rngs]
nm = self._lvis_api.load_cats([catId])[0]
precision = precisions[:, :, idx, 0]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
eval_results[f'{nm["name"]}_precision'] = round(ap, 3)
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
logger.info('\n' + table.table)
if metric_items is None:
metric_items = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'APr',
'APc', 'APf'
]
for k, v in lvis_results.items():
if k in metric_items:
key = '{}_{}'.format(metric, k)
val = float('{:.3f}'.format(float(v)))
eval_results[key] = val
lvis_eval.print_results()
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 15,924 | 42.630137 | 149 |
py
|
ERD
|
ERD-main/mmdet/evaluation/metrics/voc_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from collections import OrderedDict
from typing import List, Optional, Sequence, Union
import numpy as np
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from mmdet.registry import METRICS
from ..functional import eval_map, eval_recalls
@METRICS.register_module()
class VOCMetric(BaseMetric):
"""Pascal VOC evaluation metric.
Args:
iou_thrs (float or List[float]): IoU threshold. Defaults to 0.5.
scale_ranges (List[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Defaults to None.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'. If is list, the first setting in the list will
be used to evaluate metric.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
eval_mode (str): 'area' or '11points', 'area' means calculating the
area under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1].
The PASCAL VOC2007 defaults to use '11points', while PASCAL
VOC2012 defaults to use 'area'.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'pascal_voc'
def __init__(self,
iou_thrs: Union[float, List[float]] = 0.5,
scale_ranges: Optional[List[tuple]] = None,
metric: Union[str, List[str]] = 'mAP',
proposal_nums: Sequence[int] = (100, 300, 1000),
eval_mode: str = '11points',
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.iou_thrs = [iou_thrs] if isinstance(iou_thrs, float) \
else iou_thrs
self.scale_ranges = scale_ranges
# voc evaluation metrics
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['recall', 'mAP']
if metric not in allowed_metrics:
raise KeyError(
f"metric should be one of 'recall', 'mAP', but got {metric}.")
self.metric = metric
self.proposal_nums = proposal_nums
assert eval_mode in ['area', '11points'], \
'Unrecognized mode, only "area" and "11points" are supported'
self.eval_mode = eval_mode
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
gt = copy.deepcopy(data_sample)
# TODO: Need to refactor to support LoadAnnotations
gt_instances = gt['gt_instances']
gt_ignore_instances = gt['ignored_instances']
ann = dict(
labels=gt_instances['labels'].cpu().numpy(),
bboxes=gt_instances['bboxes'].cpu().numpy(),
bboxes_ignore=gt_ignore_instances['bboxes'].cpu().numpy(),
labels_ignore=gt_ignore_instances['labels'].cpu().numpy())
pred = data_sample['pred_instances']
pred_bboxes = pred['bboxes'].cpu().numpy()
pred_scores = pred['scores'].cpu().numpy()
pred_labels = pred['labels'].cpu().numpy()
dets = []
for label in range(len(self.dataset_meta['classes'])):
index = np.where(pred_labels == label)[0]
pred_bbox_scores = np.hstack(
[pred_bboxes[index], pred_scores[index].reshape((-1, 1))])
dets.append(pred_bbox_scores)
self.results.append((ann, dets))
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
gts, preds = zip(*results)
eval_results = OrderedDict()
if self.metric == 'mAP':
assert isinstance(self.iou_thrs, list)
dataset_type = self.dataset_meta.get('dataset_type')
if dataset_type in ['VOC2007', 'VOC2012']:
dataset_name = 'voc'
if dataset_type == 'VOC2007' and self.eval_mode != '11points':
warnings.warn('Pascal VOC2007 uses `11points` as default '
'evaluate mode, but you are using '
f'{self.eval_mode}.')
elif dataset_type == 'VOC2012' and self.eval_mode != 'area':
warnings.warn('Pascal VOC2012 uses `area` as default '
'evaluate mode, but you are using '
f'{self.eval_mode}.')
else:
dataset_name = self.dataset_meta['classes']
mean_aps = []
for iou_thr in self.iou_thrs:
logger.info(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
preds,
gts,
scale_ranges=self.scale_ranges,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger,
eval_mode=self.eval_mode,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
eval_results.move_to_end('mAP', last=False)
elif self.metric == 'recall':
# TODO: Currently not checked.
gt_bboxes = [ann['bboxes'] for ann in self.annotations]
recalls = eval_recalls(
gt_bboxes,
results,
self.proposal_nums,
self.iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(self.proposal_nums):
for j, iou_thr in enumerate(self.iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(self.proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
| 8,080 | 44.655367 | 90 |
py
|
ERD
|
ERD-main/mmdet/evaluation/functional/class_names.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import is_str
def wider_face_classes() -> list:
"""Class names of WIDERFace."""
return ['face']
def voc_classes() -> list:
"""Class names of PASCAL VOC."""
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes() -> list:
"""Class names of ImageNet Det."""
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes() -> list:
"""Class names of ImageNet VID."""
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes() -> list:
"""Class names of COCO."""
return [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
]
def coco_panoptic_classes() -> list:
"""Class names of COCO panoptic."""
return [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',
'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',
'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
def cityscapes_classes() -> list:
"""Class names of Cityscapes."""
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def oid_challenge_classes() -> list:
"""Class names of Open Images Challenge."""
return [
'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle',
'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl',
'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert',
'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee',
'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink',
'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table',
'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light',
'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum',
'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat',
'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt',
'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear',
'Vehicle registration plate', 'Microphone', 'Musical keyboard',
'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable',
'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries',
'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane',
'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail',
'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle',
'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat',
'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame',
'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet',
'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag',
'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree',
'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine',
'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance',
'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard',
'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf',
'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch',
'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster',
'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal',
'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer',
'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer',
'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace',
'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry',
'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot',
'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite',
'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper',
'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft',
'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter',
'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra',
'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard',
'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building',
'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll',
'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon',
'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock',
'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance',
'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair',
'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat',
'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen',
'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust',
'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot',
'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken',
'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod',
'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet',
'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture',
'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat',
'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep',
'Tablet computer', 'Pillow', 'Kitchen & dining room table',
'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree',
'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread',
'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope',
'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber',
'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies',
'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch',
'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags',
'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock',
'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza',
'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store',
'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry',
'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase',
'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft',
'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer',
'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon',
'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger',
'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball',
'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin',
'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle',
'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot',
'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle',
'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman',
'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper',
'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone',
'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear',
'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail',
'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn',
'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango',
'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell',
'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase',
'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup',
'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula',
'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon'
]
def oid_v6_classes() -> list:
"""Class names of Open Images V6."""
return [
'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football',
'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy',
'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye',
'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard',
'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber',
'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick',
'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle',
'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot',
'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy',
'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt',
'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear',
'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot',
'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee',
'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw',
'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern',
'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace',
'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer',
'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock',
'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft',
'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile',
'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel',
'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola',
'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building',
'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor',
'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment',
'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini',
'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur',
'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula',
'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser',
'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero',
'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener',
'Goggles', 'Human body', 'Roller skates', 'Coffee cup',
'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign',
'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker',
'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food',
'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove',
'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax',
'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart',
'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind',
'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light',
'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear',
'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle',
'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat',
'Baseball bat', 'Baseball glove', 'Mixing bowl',
'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House',
'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed',
'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer',
'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster',
'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw',
'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate',
'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove',
'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)',
'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet',
'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife',
'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse',
'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard',
'Billiard table', 'Mammal', 'Mouse', 'Motorcycle',
'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow',
'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk',
'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom',
'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device',
'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard',
'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball',
'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl',
'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta',
'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer',
'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile',
'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda',
'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood',
'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi',
'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine',
'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table',
'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco',
'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree',
'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray',
'Trousers', 'Bowling equipment', 'Football helmet', 'Truck',
'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag',
'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale',
'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion',
'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck',
'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper',
'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog',
'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer',
'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark',
'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser',
'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger',
'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus',
'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull',
'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench',
'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange',
'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet',
'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut',
'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera',
'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable',
'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish',
'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple',
'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower',
'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug',
'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow',
'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone',
'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray',
'Kitchen & dining room table', 'Dog bed', 'Cake stand',
'Cat furniture', 'Bathroom accessory', 'Facial tissue holder',
'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler',
'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry',
'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily',
'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant',
'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon',
'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich',
'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod',
'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume',
'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair',
'Rugby ball', 'Armadillo', 'Maracas', 'Helmet'
]
def objects365v1_classes() -> list:
"""Class names of Objects365 V1."""
return [
'person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle',
'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk',
'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes',
'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv',
'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl',
'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can',
'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket',
'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv',
'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone',
'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle',
'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish',
'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle',
'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle',
'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus',
'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone',
'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon',
'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal',
'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane',
'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote',
'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato',
'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone',
'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine',
'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove',
'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat',
'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun',
'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck',
'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator',
'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies',
'cutting/chopping board', 'tennis racket', 'candy',
'skating and skiing shoes', 'scissors', 'folder', 'baseball',
'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine',
'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear',
'american football', 'basketball', 'potato', 'paint brush', 'printer',
'billiards', 'fire hydrant', 'goose', 'projector', 'sausage',
'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball',
'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee',
'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender',
'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango',
'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion',
'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale',
'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple',
'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle',
'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar',
'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD',
'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado',
'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear',
'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn',
'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball',
'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice',
'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel',
'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope',
'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut',
'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly',
'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker',
'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion',
'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill',
'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup',
'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish',
'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun',
'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal',
'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case',
'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey',
'durian', 'game board', 'rabbit', 'french horn', 'ambulance',
'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon',
'chainsaw', 'lobster', 'iron', 'flashlight'
]
def objects365v2_classes() -> list:
"""Class names of Objects365 V2."""
return [
'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp',
'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf',
'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet',
'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower',
'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots',
'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt',
'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker',
'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool',
'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum',
'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar',
'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',
'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy',
'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent',
'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner',
'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork',
'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock', 'Pot',
'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger',
'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine',
'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle',
'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane',
'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage',
'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone',
'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane',
'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',
'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza',
'Elephant', 'Skateboard', 'Surfboard', 'Gun',
'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot',
'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper',
'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',
'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board',
'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder',
'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball',
'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin',
'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards',
'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase',
'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck',
'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket',
'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis',
'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion',
'Green beans', 'Projector', 'Frisbee',
'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon',
'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon',
'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog',
'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer',
'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple',
'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle',
'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone',
'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion',
'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom',
'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',
'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese',
'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue',
'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap',
'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut',
'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak',
'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate',
'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba',
'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly',
'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill',
'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter',
'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target',
'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak',
'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop',
'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle',
'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster',
'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling',
'Table Tennis '
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'coco_panoptic': ['coco_panoptic', 'panoptic'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
'cityscapes': ['cityscapes'],
'oid_challenge': ['oid_challenge', 'openimages_challenge'],
'oid_v6': ['oid_v6', 'openimages_v6'],
'objects365v1': ['objects365v1', 'obj365v1'],
'objects365v2': ['objects365v2', 'obj365v2']
}
def get_classes(dataset) -> list:
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| 32,936 | 62.584942 | 79 |
py
|
ERD
|
ERD-main/mmdet/evaluation/functional/recall.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
import numpy as np
from mmengine.logging import print_log
from terminaltables import AsciiTable
from .bbox_overlaps import bbox_overlaps
def _recalls(all_ious, proposal_nums, thrs):
img_num = all_ious.shape[0]
total_gt_num = sum([ious.shape[0] for ious in all_ious])
_ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
for k, proposal_num in enumerate(proposal_nums):
tmp_ious = np.zeros(0)
for i in range(img_num):
ious = all_ious[i][:, :proposal_num].copy()
gt_ious = np.zeros((ious.shape[0]))
if ious.size == 0:
tmp_ious = np.hstack((tmp_ious, gt_ious))
continue
for j in range(ious.shape[0]):
gt_max_overlaps = ious.argmax(axis=1)
max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
gt_idx = max_ious.argmax()
gt_ious[j] = max_ious[gt_idx]
box_idx = gt_max_overlaps[gt_idx]
ious[gt_idx, :] = -1
ious[:, box_idx] = -1
tmp_ious = np.hstack((tmp_ious, gt_ious))
_ious[k, :] = tmp_ious
_ious = np.fliplr(np.sort(_ious, axis=1))
recalls = np.zeros((proposal_nums.size, thrs.size))
for i, thr in enumerate(thrs):
recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
return recalls
def set_recall_param(proposal_nums, iou_thrs):
"""Check proposal_nums and iou_thrs and set correct format."""
if isinstance(proposal_nums, Sequence):
_proposal_nums = np.array(proposal_nums)
elif isinstance(proposal_nums, int):
_proposal_nums = np.array([proposal_nums])
else:
_proposal_nums = proposal_nums
if iou_thrs is None:
_iou_thrs = np.array([0.5])
elif isinstance(iou_thrs, Sequence):
_iou_thrs = np.array(iou_thrs)
elif isinstance(iou_thrs, float):
_iou_thrs = np.array([iou_thrs])
else:
_iou_thrs = iou_thrs
return _proposal_nums, _iou_thrs
def eval_recalls(gts,
proposals,
proposal_nums=None,
iou_thrs=0.5,
logger=None,
use_legacy_coordinate=False):
"""Calculate recalls.
Args:
gts (list[ndarray]): a list of arrays of shape (n, 4)
proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)
proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.
iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.
logger (logging.Logger | str | None): The way to print the recall
summary. See `mmengine.logging.print_log()` for details.
Default: None.
use_legacy_coordinate (bool): Whether use coordinate system
in mmdet v1.x. "1" was added to both height and width
which means w, h should be
computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False.
Returns:
ndarray: recalls of different ious and proposal nums
"""
img_num = len(gts)
assert img_num == len(proposals)
proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
all_ious = []
for i in range(img_num):
if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
scores = proposals[i][:, 4]
sort_idx = np.argsort(scores)[::-1]
img_proposal = proposals[i][sort_idx, :]
else:
img_proposal = proposals[i]
prop_num = min(img_proposal.shape[0], proposal_nums[-1])
if gts[i] is None or gts[i].shape[0] == 0:
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(
gts[i],
img_proposal[:prop_num, :4],
use_legacy_coordinate=use_legacy_coordinate)
all_ious.append(ious)
all_ious = np.array(all_ious)
recalls = _recalls(all_ious, proposal_nums, iou_thrs)
print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)
return recalls
def print_recall_summary(recalls,
proposal_nums,
iou_thrs,
row_idxs=None,
col_idxs=None,
logger=None):
"""Print recalls in a table.
Args:
recalls (ndarray): calculated from `bbox_recalls`
proposal_nums (ndarray or list): top N proposals
iou_thrs (ndarray or list): iou thresholds
row_idxs (ndarray): which rows(proposal nums) to print
col_idxs (ndarray): which cols(iou thresholds) to print
logger (logging.Logger | str | None): The way to print the recall
summary. See `mmengine.logging.print_log()` for details.
Default: None.
"""
proposal_nums = np.array(proposal_nums, dtype=np.int32)
iou_thrs = np.array(iou_thrs)
if row_idxs is None:
row_idxs = np.arange(proposal_nums.size)
if col_idxs is None:
col_idxs = np.arange(iou_thrs.size)
row_header = [''] + iou_thrs[col_idxs].tolist()
table_data = [row_header]
for i, num in enumerate(proposal_nums[row_idxs]):
row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]
row.insert(0, num)
table_data.append(row)
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
def plot_num_recall(recalls, proposal_nums):
"""Plot Proposal_num-Recalls curve.
Args:
recalls(ndarray or list): shape (k,)
proposal_nums(ndarray or list): same shape as `recalls`
"""
if isinstance(proposal_nums, np.ndarray):
_proposal_nums = proposal_nums.tolist()
else:
_proposal_nums = proposal_nums
if isinstance(recalls, np.ndarray):
_recalls = recalls.tolist()
else:
_recalls = recalls
import matplotlib.pyplot as plt
f = plt.figure()
plt.plot([0] + _proposal_nums, [0] + _recalls)
plt.xlabel('Proposal num')
plt.ylabel('Recall')
plt.axis([0, proposal_nums.max(), 0, 1])
f.show()
def plot_iou_recall(recalls, iou_thrs):
"""Plot IoU-Recalls curve.
Args:
recalls(ndarray or list): shape (k,)
iou_thrs(ndarray or list): same shape as `recalls`
"""
if isinstance(iou_thrs, np.ndarray):
_iou_thrs = iou_thrs.tolist()
else:
_iou_thrs = iou_thrs
if isinstance(recalls, np.ndarray):
_recalls = recalls.tolist()
else:
_recalls = recalls
import matplotlib.pyplot as plt
f = plt.figure()
plt.plot(_iou_thrs + [1.0], _recalls + [0.])
plt.xlabel('IoU')
plt.ylabel('Recall')
plt.axis([iou_thrs.min(), 1, 0, 1])
f.show()
| 6,848 | 33.245 | 79 |
py
|
ERD
|
ERD-main/mmdet/evaluation/functional/cityscapes_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) https://github.com/mcordts/cityscapesScripts
# A wrapper of `cityscapesscripts` which supports loading groundtruth
# image from `backend_args`.
import json
import os
import sys
from pathlib import Path
from typing import Optional, Union
import mmcv
import numpy as np
from mmengine.fileio import get
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa: E501
from cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling import \
CArgs # noqa: E501
from cityscapesscripts.evaluation.instance import Instance
from cityscapesscripts.helpers.csHelpers import (id2label, labels,
writeDict2JSON)
HAS_CITYSCAPESAPI = True
except ImportError:
CArgs = object
HAS_CITYSCAPESAPI = False
def evaluateImgLists(prediction_list: list,
groundtruth_list: list,
args: CArgs,
backend_args: Optional[dict] = None,
dump_matches: bool = False) -> dict:
"""A wrapper of obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling.evaluateImgLists``. Support loading
groundtruth image from file backend.
Args:
prediction_list (list): A list of prediction txt file.
groundtruth_list (list): A list of groundtruth image file.
args (CArgs): A global object setting in
obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling``
backend_args (dict, optional): Arguments to instantiate the
preifx of uri corresponding backend. Defaults to None.
dump_matches (bool): whether dump matches.json. Defaults to False.
Returns:
dict: The computed metric.
"""
if not HAS_CITYSCAPESAPI:
raise RuntimeError('Failed to import `cityscapesscripts`.'
'Please try to install official '
'cityscapesscripts by '
'"pip install cityscapesscripts"')
# determine labels of interest
CSEval.setInstanceLabels(args)
# get dictionary of all ground truth instances
gt_instances = getGtInstances(
groundtruth_list, args, backend_args=backend_args)
# match predictions and ground truth
matches = matchGtWithPreds(prediction_list, groundtruth_list, gt_instances,
args, backend_args)
if dump_matches:
CSEval.writeDict2JSON(matches, 'matches.json')
# evaluate matches
apScores = CSEval.evaluateMatches(matches, args)
# averages
avgDict = CSEval.computeAverages(apScores, args)
# result dict
resDict = CSEval.prepareJSONDataForResults(avgDict, apScores, args)
if args.JSONOutput:
# create output folder if necessary
path = os.path.dirname(args.exportFile)
CSEval.ensurePath(path)
# Write APs to JSON
CSEval.writeDict2JSON(resDict, args.exportFile)
CSEval.printResults(avgDict, args)
return resDict
def matchGtWithPreds(prediction_list: list,
groundtruth_list: list,
gt_instances: dict,
args: CArgs,
backend_args=None):
"""A wrapper of obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling.matchGtWithPreds``. Support loading
groundtruth image from file backend.
Args:
prediction_list (list): A list of prediction txt file.
groundtruth_list (list): A list of groundtruth image file.
gt_instances (dict): Groundtruth dict.
args (CArgs): A global object setting in
obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling``
backend_args (dict, optional): Arguments to instantiate the
preifx of uri corresponding backend. Defaults to None.
Returns:
dict: The processed prediction and groundtruth result.
"""
if not HAS_CITYSCAPESAPI:
raise RuntimeError('Failed to import `cityscapesscripts`.'
'Please try to install official '
'cityscapesscripts by '
'"pip install cityscapesscripts"')
matches: dict = dict()
if not args.quiet:
print(f'Matching {len(prediction_list)} pairs of images...')
count = 0
for (pred, gt) in zip(prediction_list, groundtruth_list):
# Read input files
gt_image = readGTImage(gt, backend_args)
pred_info = readPredInfo(pred)
# Get and filter ground truth instances
unfiltered_instances = gt_instances[gt]
cur_gt_instances_orig = CSEval.filterGtInstances(
unfiltered_instances, args)
# Try to assign all predictions
(cur_gt_instances,
cur_pred_instances) = CSEval.assignGt2Preds(cur_gt_instances_orig,
gt_image, pred_info, args)
# append to global dict
matches[gt] = {}
matches[gt]['groundTruth'] = cur_gt_instances
matches[gt]['prediction'] = cur_pred_instances
count += 1
if not args.quiet:
print(f'\rImages Processed: {count}', end=' ')
sys.stdout.flush()
if not args.quiet:
print('')
return matches
def readGTImage(image_file: Union[str, Path],
backend_args: Optional[dict] = None) -> np.ndarray:
"""Read an image from path.
Same as obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling.readGTImage``, but support loading
groundtruth image from file backend.
Args:
image_file (str or Path): Either a str or pathlib.Path.
backend_args (dict, optional): Instantiates the corresponding file
backend. It may contain `backend` key to specify the file
backend. If it contains, the file backend corresponding to this
value will be used and initialized with the remaining values,
otherwise the corresponding file backend will be selected
based on the prefix of the file path. Defaults to None.
Returns:
np.ndarray: The groundtruth image.
"""
img_bytes = get(image_file, backend_args=backend_args)
img = mmcv.imfrombytes(img_bytes, flag='unchanged', backend='pillow')
return img
def readPredInfo(prediction_file: str) -> dict:
"""A wrapper of obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling.readPredInfo``.
Args:
prediction_file (str): The prediction txt file.
Returns:
dict: The processed prediction results.
"""
if not HAS_CITYSCAPESAPI:
raise RuntimeError('Failed to import `cityscapesscripts`.'
'Please try to install official '
'cityscapesscripts by '
'"pip install cityscapesscripts"')
printError = CSEval.printError
predInfo = {}
if (not os.path.isfile(prediction_file)):
printError(f"Infofile '{prediction_file}' "
'for the predictions not found.')
with open(prediction_file) as f:
for line in f:
splittedLine = line.split(' ')
if len(splittedLine) != 3:
printError('Invalid prediction file. Expected content: '
'relPathPrediction1 labelIDPrediction1 '
'confidencePrediction1')
if os.path.isabs(splittedLine[0]):
printError('Invalid prediction file. First entry in each '
'line must be a relative path.')
filename = os.path.join(
os.path.dirname(prediction_file), splittedLine[0])
imageInfo = {}
imageInfo['labelID'] = int(float(splittedLine[1]))
imageInfo['conf'] = float(splittedLine[2]) # type: ignore
predInfo[filename] = imageInfo
return predInfo
def getGtInstances(groundtruth_list: list,
args: CArgs,
backend_args: Optional[dict] = None) -> dict:
"""A wrapper of obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling.getGtInstances``. Support loading
groundtruth image from file backend.
Args:
groundtruth_list (list): A list of groundtruth image file.
args (CArgs): A global object setting in
obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling``
backend_args (dict, optional): Arguments to instantiate the
preifx of uri corresponding backend. Defaults to None.
Returns:
dict: The computed metric.
"""
if not HAS_CITYSCAPESAPI:
raise RuntimeError('Failed to import `cityscapesscripts`.'
'Please try to install official '
'cityscapesscripts by '
'"pip install cityscapesscripts"')
# if there is a global statistics json, then load it
if (os.path.isfile(args.gtInstancesFile)):
if not args.quiet:
print('Loading ground truth instances from JSON.')
with open(args.gtInstancesFile) as json_file:
gt_instances = json.load(json_file)
# otherwise create it
else:
if (not args.quiet):
print('Creating ground truth instances from png files.')
gt_instances = instances2dict(
groundtruth_list, args, backend_args=backend_args)
writeDict2JSON(gt_instances, args.gtInstancesFile)
return gt_instances
def instances2dict(image_list: list,
args: CArgs,
backend_args: Optional[dict] = None) -> dict:
"""A wrapper of obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling.instances2dict``. Support loading
groundtruth image from file backend.
Args:
image_list (list): A list of image file.
args (CArgs): A global object setting in
obj:``cityscapesscripts.evaluation.
evalInstanceLevelSemanticLabeling``
backend_args (dict, optional): Arguments to instantiate the
preifx of uri corresponding backend. Defaults to None.
Returns:
dict: The processed groundtruth results.
"""
if not HAS_CITYSCAPESAPI:
raise RuntimeError('Failed to import `cityscapesscripts`.'
'Please try to install official '
'cityscapesscripts by '
'"pip install cityscapesscripts"')
imgCount = 0
instanceDict = {}
if not isinstance(image_list, list):
image_list = [image_list]
if not args.quiet:
print(f'Processing {len(image_list)} images...')
for image_name in image_list:
# Load image
img_bytes = get(image_name, backend_args=backend_args)
imgNp = mmcv.imfrombytes(img_bytes, flag='unchanged', backend='pillow')
# Initialize label categories
instances: dict = {}
for label in labels:
instances[label.name] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
instanceObj = Instance(imgNp, instanceId)
instances[id2label[instanceObj.labelID].name].append(
instanceObj.toDict())
instanceDict[image_name] = instances
imgCount += 1
if not args.quiet:
print(f'\rImages Processed: {imgCount}', end=' ')
sys.stdout.flush()
return instanceDict
| 11,654 | 37.465347 | 97 |
py
|
ERD
|
ERD-main/mmdet/evaluation/functional/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_overlaps import bbox_overlaps
from .cityscapes_utils import evaluateImgLists
from .class_names import (cityscapes_classes, coco_classes,
coco_panoptic_classes, dataset_aliases, get_classes,
imagenet_det_classes, imagenet_vid_classes,
objects365v1_classes, objects365v2_classes,
oid_challenge_classes, oid_v6_classes, voc_classes)
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import (INSTANCE_OFFSET, pq_compute_multi_core,
pq_compute_single_core)
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls',
'print_recall_summary', 'plot_num_recall', 'plot_iou_recall',
'oid_v6_classes', 'oid_challenge_classes', 'INSTANCE_OFFSET',
'pq_compute_single_core', 'pq_compute_multi_core', 'bbox_overlaps',
'objects365v1_classes', 'objects365v2_classes', 'coco_panoptic_classes',
'evaluateImgLists'
]
| 1,331 | 52.28 | 78 |
py
|
ERD
|
ERD-main/mmdet/evaluation/functional/panoptic_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2018, Alexander Kirillov
# This file supports `backend_args` for `panopticapi`,
# the source code is copied from `panopticapi`,
# only the way to load the gt images is modified.
import multiprocessing
import os
import mmcv
import numpy as np
from mmengine.fileio import get
# A custom value to distinguish instance ID and category ID; need to
# be greater than the number of categories.
# For a pixel in the panoptic result map:
# pan_id = ins_id * INSTANCE_OFFSET + cat_id
INSTANCE_OFFSET = 1000
try:
from panopticapi.evaluation import OFFSET, VOID, PQStat
from panopticapi.utils import rgb2id
except ImportError:
PQStat = None
rgb2id = None
VOID = 0
OFFSET = 256 * 256 * 256
def pq_compute_single_core(proc_id,
annotation_set,
gt_folder,
pred_folder,
categories,
backend_args=None,
print_log=False):
"""The single core function to evaluate the metric of Panoptic
Segmentation.
Same as the function with the same name in `panopticapi`. Only the function
to load the images is changed to use the file client.
Args:
proc_id (int): The id of the mini process.
gt_folder (str): The path of the ground truth images.
pred_folder (str): The path of the prediction images.
categories (str): The categories of the dataset.
backend_args (object): The Backend of the dataset. If None,
the backend will be set to `local`.
print_log (bool): Whether to print the log. Defaults to False.
"""
if PQStat is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
pq_stat = PQStat()
idx = 0
for gt_ann, pred_ann in annotation_set:
if print_log and idx % 100 == 0:
print('Core: {}, {} from {} images processed'.format(
proc_id, idx, len(annotation_set)))
idx += 1
# The gt images can be on the local disk or `ceph`, so we use
# backend here.
img_bytes = get(
os.path.join(gt_folder, gt_ann['file_name']),
backend_args=backend_args)
pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb')
pan_gt = rgb2id(pan_gt)
# The predictions can only be on the local dist now.
pan_pred = mmcv.imread(
os.path.join(pred_folder, pred_ann['file_name']),
flag='color',
channel_order='rgb')
pan_pred = rgb2id(pan_pred)
gt_segms = {el['id']: el for el in gt_ann['segments_info']}
pred_segms = {el['id']: el for el in pred_ann['segments_info']}
# predicted segments area calculation + prediction sanity checks
pred_labels_set = set(el['id'] for el in pred_ann['segments_info'])
labels, labels_cnt = np.unique(pan_pred, return_counts=True)
for label, label_cnt in zip(labels, labels_cnt):
if label not in pred_segms:
if label == VOID:
continue
raise KeyError(
'In the image with ID {} segment with ID {} is '
'presented in PNG and not presented in JSON.'.format(
gt_ann['image_id'], label))
pred_segms[label]['area'] = label_cnt
pred_labels_set.remove(label)
if pred_segms[label]['category_id'] not in categories:
raise KeyError(
'In the image with ID {} segment with ID {} has '
'unknown category_id {}.'.format(
gt_ann['image_id'], label,
pred_segms[label]['category_id']))
if len(pred_labels_set) != 0:
raise KeyError(
'In the image with ID {} the following segment IDs {} '
'are presented in JSON and not presented in PNG.'.format(
gt_ann['image_id'], list(pred_labels_set)))
# confusion matrix calculation
pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype(
np.uint64)
gt_pred_map = {}
labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True)
for label, intersection in zip(labels, labels_cnt):
gt_id = label // OFFSET
pred_id = label % OFFSET
gt_pred_map[(gt_id, pred_id)] = intersection
# count all matched pairs
gt_matched = set()
pred_matched = set()
for label_tuple, intersection in gt_pred_map.items():
gt_label, pred_label = label_tuple
if gt_label not in gt_segms:
continue
if pred_label not in pred_segms:
continue
if gt_segms[gt_label]['iscrowd'] == 1:
continue
if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][
'category_id']:
continue
union = pred_segms[pred_label]['area'] + gt_segms[gt_label][
'area'] - intersection - gt_pred_map.get((VOID, pred_label), 0)
iou = intersection / union
if iou > 0.5:
pq_stat[gt_segms[gt_label]['category_id']].tp += 1
pq_stat[gt_segms[gt_label]['category_id']].iou += iou
gt_matched.add(gt_label)
pred_matched.add(pred_label)
# count false positives
crowd_labels_dict = {}
for gt_label, gt_info in gt_segms.items():
if gt_label in gt_matched:
continue
# crowd segments are ignored
if gt_info['iscrowd'] == 1:
crowd_labels_dict[gt_info['category_id']] = gt_label
continue
pq_stat[gt_info['category_id']].fn += 1
# count false positives
for pred_label, pred_info in pred_segms.items():
if pred_label in pred_matched:
continue
# intersection of the segment with VOID
intersection = gt_pred_map.get((VOID, pred_label), 0)
# plus intersection with corresponding CROWD region if it exists
if pred_info['category_id'] in crowd_labels_dict:
intersection += gt_pred_map.get(
(crowd_labels_dict[pred_info['category_id']], pred_label),
0)
# predicted segment is ignored if more than half of
# the segment correspond to VOID and CROWD regions
if intersection / pred_info['area'] > 0.5:
continue
pq_stat[pred_info['category_id']].fp += 1
if print_log:
print('Core: {}, all {} images processed'.format(
proc_id, len(annotation_set)))
return pq_stat
def pq_compute_multi_core(matched_annotations_list,
gt_folder,
pred_folder,
categories,
backend_args=None,
nproc=32):
"""Evaluate the metrics of Panoptic Segmentation with multithreading.
Same as the function with the same name in `panopticapi`.
Args:
matched_annotations_list (list): The matched annotation list. Each
element is a tuple of annotations of the same image with the
format (gt_anns, pred_anns).
gt_folder (str): The path of the ground truth images.
pred_folder (str): The path of the prediction images.
categories (str): The categories of the dataset.
backend_args (object): The file client of the dataset. If None,
the backend will be set to `local`.
nproc (int): Number of processes for panoptic quality computing.
Defaults to 32. When `nproc` exceeds the number of cpu cores,
the number of cpu cores is used.
"""
if PQStat is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
cpu_num = min(nproc, multiprocessing.cpu_count())
annotations_split = np.array_split(matched_annotations_list, cpu_num)
print('Number of cores: {}, images per core: {}'.format(
cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for proc_id, annotation_set in enumerate(annotations_split):
p = workers.apply_async(pq_compute_single_core,
(proc_id, annotation_set, gt_folder,
pred_folder, categories, backend_args))
processes.append(p)
# Close the process pool, otherwise it will lead to memory
# leaking problems.
workers.close()
workers.join()
pq_stat = PQStat()
for p in processes:
pq_stat += p.get()
return pq_stat
| 9,128 | 38.864629 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.