repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_tood_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
| 4,941 | 37.310078 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_solo_head.py
|
import pytest
import torch
from mmdet.models.dense_heads import (DecoupledSOLOHead,
DecoupledSOLOLightHead, SOLOHead)
def test_solo_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = SOLOHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to
# predict background.
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(
mask_preds,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()]
one_gt_losses = self.loss(
mask_preds,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# When the length of num_grids, scale_ranges, and num_levels are not equal.
with pytest.raises(AssertionError):
SOLOHead(
num_classes=4,
in_channels=1,
num_grids=[36, 24, 16, 12],
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
# When input feature length is not equal to num_levels.
with pytest.raises(AssertionError):
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32]
]
self.forward(feat)
def test_desolo_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = DecoupledSOLOHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds_x, mask_preds_y, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to
# predict background.
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()]
one_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# When the length of num_grids, scale_ranges, and num_levels are not equal.
with pytest.raises(AssertionError):
DecoupledSOLOHead(
num_classes=4,
in_channels=1,
num_grids=[36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
# When input feature length is not equal to num_levels.
with pytest.raises(AssertionError):
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32]
]
self.forward(feat)
def test_desolo_light_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = DecoupledSOLOLightHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds_x, mask_preds_y, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to
# predict background.
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()]
one_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# When the length of num_grids, scale_ranges, and num_levels are not equal.
with pytest.raises(AssertionError):
DecoupledSOLOLightHead(
num_classes=4,
in_channels=1,
num_grids=[36, 24, 16, 12],
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
# When input feature length is not equal to num_levels.
with pytest.raises(AssertionError):
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32]
]
self.forward(feat)
| 9,519 | 32.403509 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_yolox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg)
assert not self.use_l1
assert isinstance(self.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when there are no true boxes')
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(self.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
self.use_l1 = True
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero'
assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero'
# Test groud truth out of bound
gt_bboxes = [torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]])]
gt_labels = [torch.LongTensor([2])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when gt_bboxes out of bound')
assert empty_box_loss.item() == 0, (
'there should be no box loss when gt_bboxes out of bound')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
| 3,809 | 41.808989 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_autoassign_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
strides=(4, ))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 3,580 | 37.923913 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_ld_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss should
# be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_ld_loss.item() >= 0, 'ld loss should be non-negative'
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
gt_bboxes_ignore = gt_bboxes
# When truth is non-empty but ignored then the cls loss should be nonzero,
# but there should be no box loss.
ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
assert ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert ignore_box_loss.item() == 0, 'gt bbox ignored loss should be zero'
# When truth is non-empty and not ignored then both cls and box loss should
# be nonzero for random inputs
gt_bboxes_ignore = [torch.randn(1, 4)]
not_ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, rand_soft_target, img_metas,
gt_bboxes_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
assert not_ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert not_ignore_box_loss.item(
) > 0, 'gt bbox not ignored loss should be non-zero'
| 4,605 | 36.754098 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_paa_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 4,800 | 34.301471 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_detr_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import DETRHead
def test_detr_head_loss():
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s)
}]
config = ConfigDict(
dict(
type='DETRHead',
num_classes=80,
in_channels=200,
transformer=dict(
type='Transformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm')),
)),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)))
self = DETRHead(**config)
self.init_weights()
feat = [torch.rand(1, 200, 10, 10)]
cls_scores, bbox_preds = self.forward(feat, img_metas)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
assert loss.item() > 0, 'cls loss should be non-zero'
elif 'bbox' in key:
assert loss.item(
) == 0, 'there should be no box loss when there are no true boxes'
elif 'iou' in key:
assert loss.item(
) == 0, 'there should be no iou loss when there are no true boxes'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
for loss in one_gt_losses.values():
assert loss.item(
) > 0, 'cls loss, or box loss, or iou loss should be non-zero'
# test forward_train
self.forward_train(feat, img_metas, gt_bboxes, gt_labels)
# test inference mode
self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True)
| 4,130 | 38.342857 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_fcos_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import FCOSHead
def test_fcos_head_loss():
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = FCOSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centerness = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,406 | 36.030769 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_yolact_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
def test_yolact_head_loss():
"""Tests yolact head losses when truth is empty and non-empty."""
s = 550
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False,
min_gt_box_wh=[4.0, 4.0]))
bbox_head = YOLACTHead(
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True,
train_cfg=train_cfg)
segm_head = YOLACTSegmHead(
in_channels=256,
num_classes=80,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
mask_head = YOLACTProtonet(
num_classes=80,
in_channels=256,
num_protos=32,
max_masks_to_train=100,
loss_mask_weight=6.125)
feat = [
torch.rand(1, 256, feat_size, feat_size)
for feat_size in [69, 35, 18, 9, 5]
]
cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# Test segm head and mask head
segm_head_outs = segm_head(feat[0])
empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas,
sampling_results)
# When there is no truth, the segm and mask loss should be zero.
empty_segm_loss = sum(empty_segm_loss['loss_segm'])
empty_mask_loss = sum(empty_mask_loss['loss_mask'])
assert empty_segm_loss.item() == 0, (
'there should be no segm loss when there are no true boxes')
assert empty_mask_loss == 0, (
'there should be no mask loss when there are no true boxes')
# When truth is non-empty then cls, box, mask, segm loss should be
# nonzero for random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
one_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert one_gt_box_loss.item() > 0, 'box loss should be non-zero'
one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero'
assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero'
| 5,247 | 37.028986 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_sabl_retina_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import SABLRetinaHead
def test_sabl_retina_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = SABLRetinaHead(
num_classes=4,
in_channels=3,
feat_channels=10,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
train_cfg=cfg)
if torch.cuda.is_available():
head.cuda()
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 3, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds = head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls'])
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_cls_loss.item() == 0, (
'there should be no box cls loss when there are no true boxes')
assert empty_box_reg_loss.item() == 0, (
'there should be no box reg loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls'])
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_cls_loss.item() > 0, 'box loss cls should be non-zero'
assert onegt_box_reg_loss.item() > 0, 'box loss reg should be non-zero'
| 3,080 | 39.012987 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_atss_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import ATSSHead
def test_atss_head_loss():
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = ATSSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centernesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centernesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_centerness_loss.item() == 0, (
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centernesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_centerness_loss.item() > 0, (
'centerness loss should be non-zero')
| 2,949 | 36.820513 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_dense_heads/test_gfl_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead
def test_gfl_head_loss():
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_dfl_loss.item() == 0, (
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_dfl_loss.item() > 0, 'dfl loss should be non-zero'
| 2,786 | 36.16 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_roi_heads/test_roi_extractor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.roi_heads.roi_extractors import GenericRoIExtractor
def test_groie():
# test with pre/post
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False))
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = groie(feats, rois)
assert res.shape == torch.Size([1, 256, 7, 7])
# test w.o. pre/post
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32])
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = groie(feats, rois)
assert res.shape == torch.Size([1, 256, 7, 7])
# test w.o. pre/post concat
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256 * 4,
featmap_strides=[4, 8, 16, 32])
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = groie(feats, rois)
assert res.shape == torch.Size([1, 1024, 7, 7])
# test not supported aggregate method
with pytest.raises(AssertionError):
cfg = dict(
aggregation='not support',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=1024,
featmap_strides=[4, 8, 16, 32])
_ = GenericRoIExtractor(**cfg)
# test concat channels number
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256 * 5, # 256*5 != 256*4
featmap_strides=[4, 8, 16, 32])
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
# out_channels does not sum of feat channels
with pytest.raises(AssertionError):
_ = groie(feats, rois)
| 3,257 | 27.330435 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_roi_heads/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import build_assigner, build_sampler
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
"""Create sample results that can be passed to BBoxHead.get_targets."""
num_imgs = 1
feat = torch.rand(1, 1, 3, 3)
assign_config = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
sampler_config = dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True)
bbox_assigner = build_assigner(assign_config)
bbox_sampler = build_sampler(sampler_config)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=feat)
sampling_results.append(sampling_result)
return sampling_results
| 1,249 | 31.051282 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_roi_heads/test_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
| 3,430 | 34.371134 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_roi_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import _dummy_bbox_sampling
__all__ = ['_dummy_bbox_sampling']
| 124 | 24 | 47 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_roi_heads/test_sabl_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import bbox2roi
from mmdet.models.roi_heads.bbox_heads import SABLHead
from .utils import _dummy_bbox_sampling
def test_sabl_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = SABLHead(
num_classes=4,
cls_in_channels=3,
reg_in_channels=3,
cls_out_channels=3,
reg_offset_out_channels=3,
reg_cls_out_channels=3,
roi_feat_size=7)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
rois = bbox2roi([res.bboxes for res in sampling_results])
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox_cls',
0) == 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) == 0, 'empty gt bbox-reg-loss should be zero'
# Test bbox loss when truth is non-empty
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_bbox_cls',
0) > 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) > 0, 'empty gt bbox-reg-loss should be zero'
| 2,979 | 37.205128 | 75 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_models/test_roi_heads/test_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import bbox2roi
from mmdet.models.roi_heads.bbox_heads import BBoxHead
from .utils import _dummy_bbox_sampling
def test_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = BBoxHead(in_channels=8, roi_feat_size=3)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
rois = bbox2roi([res.bboxes for res in sampling_results])
dummy_feats = torch.rand(num_sampled, 8 * 3 * 3)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox', 0) == 0, 'empty gt loss should be zero'
# Test bbox loss when truth is non-empty
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8 * 3 * 3)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero'
@pytest.mark.parametrize('num_sample', [0, 1, 2])
def test_bbox_head_get_bboxes(num_sample):
self = BBoxHead(reg_class_agnostic=True)
num_class = 6
rois = torch.rand((num_sample, 5))
cls_score = torch.rand((num_sample, num_class))
bbox_pred = torch.rand((num_sample, 4))
scale_factor = np.array([2.0, 2.0, 2.0, 2.0])
det_bboxes, det_labels = self.get_bboxes(
rois, cls_score, bbox_pred, None, scale_factor, rescale=True)
if num_sample == 0:
assert len(det_bboxes) == 0 and len(det_labels) == 0
else:
assert det_bboxes.shape == bbox_pred.shape
assert det_labels.shape == cls_score.shape
def test_refine_boxes():
"""Mirrors the doctest in
``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` but checks for
multiple values of n_roi / n_img."""
self = BBoxHead(reg_class_agnostic=True)
test_settings = [
# Corner case: less rois than images
{
'n_roi': 2,
'n_img': 4,
'rng': 34285940
},
# Corner case: no images
{
'n_roi': 0,
'n_img': 0,
'rng': 52925222
},
# Corner cases: few images / rois
{
'n_roi': 1,
'n_img': 1,
'rng': 1200281
},
{
'n_roi': 2,
'n_img': 1,
'rng': 1200282
},
{
'n_roi': 2,
'n_img': 2,
'rng': 1200283
},
{
'n_roi': 1,
'n_img': 2,
'rng': 1200284
},
# Corner case: no rois few images
{
'n_roi': 0,
'n_img': 1,
'rng': 23955860
},
{
'n_roi': 0,
'n_img': 2,
'rng': 25830516
},
# Corner case: no rois many images
{
'n_roi': 0,
'n_img': 10,
'rng': 671346
},
{
'n_roi': 0,
'n_img': 20,
'rng': 699807
},
# Corner case: cal_similarity num rois and images
{
'n_roi': 20,
'n_img': 20,
'rng': 1200238
},
{
'n_roi': 10,
'n_img': 20,
'rng': 1200238
},
{
'n_roi': 5,
'n_img': 5,
'rng': 1200238
},
# ----------------------------------
# Common case: more rois than images
{
'n_roi': 100,
'n_img': 1,
'rng': 337156
},
{
'n_roi': 150,
'n_img': 2,
'rng': 275898
},
{
'n_roi': 500,
'n_img': 5,
'rng': 4903221
},
]
for demokw in test_settings:
try:
n_roi = demokw['n_roi']
n_img = demokw['n_img']
rng = demokw['rng']
print(f'Test refine_boxes case: {demokw!r}')
tup = _demodata_refine_boxes(n_roi, n_img, rng=rng)
rois, labels, bbox_preds, pos_is_gts, img_metas = tup
bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
pos_is_gts, img_metas)
assert len(bboxes_list) == n_img
assert sum(map(len, bboxes_list)) <= n_roi
assert all(b.shape[1] == 4 for b in bboxes_list)
except Exception:
print(f'Test failed with demokw={demokw!r}')
raise
def _demodata_refine_boxes(n_roi, n_img, rng=0):
"""Create random test data for the
``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` method."""
import numpy as np
from mmdet.core.bbox.demodata import random_boxes
from mmdet.core.bbox.demodata import ensure_rng
try:
import kwarray
except ImportError:
import pytest
pytest.skip('kwarray is required for this test')
scale = 512
rng = ensure_rng(rng)
img_metas = [{'img_shape': (scale, scale)} for _ in range(n_img)]
# Create rois in the expected format
roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
if n_img == 0:
assert n_roi == 0, 'cannot have any rois if there are no images'
img_ids = torch.empty((0, ), dtype=torch.long)
roi_boxes = torch.empty((0, 4), dtype=torch.float32)
else:
img_ids = rng.randint(0, n_img, (n_roi, ))
img_ids = torch.from_numpy(img_ids)
rois = torch.cat([img_ids[:, None].float(), roi_boxes], dim=1)
# Create other args
labels = rng.randint(0, 2, (n_roi, ))
labels = torch.from_numpy(labels).long()
bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
# For each image, pretend random positive boxes are gts
is_label_pos = (labels.numpy() > 0).astype(np.int)
lbl_per_img = kwarray.group_items(is_label_pos, img_ids.numpy())
pos_per_img = [sum(lbl_per_img.get(gid, [])) for gid in range(n_img)]
# randomly generate with numpy then sort with torch
_pos_is_gts = [
rng.randint(0, 2, (npos, )).astype(np.uint8) for npos in pos_per_img
]
pos_is_gts = [
torch.from_numpy(p).sort(descending=True)[0] for p in _pos_is_gts
]
return rois, labels, bbox_preds, pos_is_gts, img_metas
| 7,949 | 30.547619 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_onnx/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import onnxruntime as ort
import torch
import torch.nn as nn
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
class WrapFunction(nn.Module):
"""Wrap the function to be tested for torch.onnx.export tracking."""
def __init__(self, wrapped_function):
super(WrapFunction, self).__init__()
self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs):
return self.wrapped_function(*args, **kwargs)
def ort_validate(model, feats, onnx_io='tmp.onnx'):
"""Validate the output of the onnxruntime backend is the same as the output
generated by torch.
Args:
model (nn.Module | function): the function of model or model
to be verified.
feats (tuple(list(torch.Tensor)) | list(torch.Tensor) | torch.Tensor):
the input of model.
onnx_io (str): the name of onnx output file.
"""
# if model is not an instance of nn.Module, then it is a normal
# function and it should be wrapped.
if isinstance(model, nn.Module):
wrap_model = model
else:
wrap_model = WrapFunction(model)
wrap_model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
wrap_model,
feats,
onnx_io,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
if isinstance(feats, tuple):
ort_feats = []
for feat in feats:
ort_feats += feat
else:
ort_feats = feats
# default model name: tmp.onnx
onnx_outputs = get_ort_model_output(ort_feats)
# remove temp file
if osp.exists(onnx_io):
os.remove(onnx_io)
if isinstance(feats, tuple):
torch_outputs = convert_result_list(wrap_model.forward(*feats))
else:
torch_outputs = convert_result_list(wrap_model.forward(feats))
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
def get_ort_model_output(feat, onnx_io='tmp.onnx'):
"""Run the model in onnxruntime env.
Args:
feat (list[Tensor]): A list of tensors from torch.rand,
each is a 4D-tensor.
Returns:
list[np.array]: onnxruntime infer result, each is a np.array
"""
onnx_model = onnx.load(onnx_io)
onnx.checker.check_model(onnx_model)
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_io, session_options)
if isinstance(feat, torch.Tensor):
onnx_outputs = sess.run(None,
{sess.get_inputs()[0].name: feat.numpy()})
else:
onnx_outputs = sess.run(None, {
sess.get_inputs()[i].name: feat[i].numpy()
for i in range(len(feat))
})
return onnx_outputs
def convert_result_list(outputs):
"""Convert the torch forward outputs containing tuple or list to a list
only containing torch.Tensor.
Args:
output (list(Tensor) | tuple(list(Tensor) | ...): the outputs
in torch env, maybe containing nested structures such as list
or tuple.
Returns:
list(Tensor): a list only containing torch.Tensor
"""
# recursive end condition
if isinstance(outputs, torch.Tensor):
return [outputs]
ret = []
for sub in outputs:
ret += convert_result_list(sub)
return ret
| 4,141 | 29.014493 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_onnx/test_neck.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import pytest
import torch
from mmdet import digit_version
from mmdet.models.necks import FPN, YOLOV3Neck
from .utils import ort_validate
if digit_version(torch.__version__) <= digit_version('1.5.0'):
pytest.skip(
'ort backend does not support version below 1.5.0',
allow_module_level=True)
# Control the returned model of fpn_neck_config()
fpn_test_step_names = {
'fpn_normal': 0,
'fpn_wo_extra_convs': 1,
'fpn_lateral_bns': 2,
'fpn_bilinear_upsample': 3,
'fpn_scale_factor': 4,
'fpn_extra_convs_inputs': 5,
'fpn_extra_convs_laterals': 6,
'fpn_extra_convs_outputs': 7,
}
# Control the returned model of yolo_neck_config()
yolo_test_step_names = {'yolo_normal': 0}
data_path = osp.join(osp.dirname(__file__), 'data')
def fpn_neck_config(test_step_name):
"""Return the class containing the corresponding attributes according to
the fpn_test_step_names."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
if (fpn_test_step_names[test_step_name] == 0):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 1):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=False,
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 2):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 3):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 4):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 5):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 6):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 7):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
num_outs=5)
return fpn_model, feats
def yolo_neck_config(test_step_name):
"""Config yolov3 Neck."""
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
# The data of yolov3_neck.pkl contains a list of
# torch.Tensor, where each torch.Tensor is generated by
# torch.rand and each tensor size is:
# (1, 4, 64, 64), (1, 8, 32, 32), (1, 16, 16, 16).
yolov3_neck_data = 'yolov3_neck.pkl'
feats = mmcv.load(osp.join(data_path, yolov3_neck_data))
if (yolo_test_step_names[test_step_name] == 0):
yolo_model = YOLOV3Neck(
in_channels=in_channels, out_channels=out_channels, num_scales=3)
return yolo_model, feats
def test_fpn_normal():
outs = fpn_neck_config('fpn_normal')
ort_validate(*outs)
def test_fpn_wo_extra_convs():
outs = fpn_neck_config('fpn_wo_extra_convs')
ort_validate(*outs)
def test_fpn_lateral_bns():
outs = fpn_neck_config('fpn_lateral_bns')
ort_validate(*outs)
def test_fpn_bilinear_upsample():
outs = fpn_neck_config('fpn_bilinear_upsample')
ort_validate(*outs)
def test_fpn_scale_factor():
outs = fpn_neck_config('fpn_scale_factor')
ort_validate(*outs)
def test_fpn_extra_convs_inputs():
outs = fpn_neck_config('fpn_extra_convs_inputs')
ort_validate(*outs)
def test_fpn_extra_convs_laterals():
outs = fpn_neck_config('fpn_extra_convs_laterals')
ort_validate(*outs)
def test_fpn_extra_convs_outputs():
outs = fpn_neck_config('fpn_extra_convs_outputs')
ort_validate(*outs)
def test_yolo_normal():
outs = yolo_neck_config('yolo_normal')
ort_validate(*outs)
| 4,808 | 28.323171 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_onnx/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import ort_validate
__all__ = ['ort_validate']
| 108 | 20.8 | 47 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_onnx/test_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from functools import partial
import mmcv
import numpy as np
import pytest
import torch
from mmcv.cnn import Scale
from mmdet import digit_version
from mmdet.models import build_detector
from mmdet.models.dense_heads import (FCOSHead, FSAFHead, RetinaHead, SSDHead,
YOLOV3Head)
from .utils import ort_validate
data_path = osp.join(osp.dirname(__file__), 'data')
if digit_version(torch.__version__) <= digit_version('1.5.0'):
pytest.skip(
'ort backend does not support version below 1.5.0',
allow_module_level=True)
def test_cascade_onnx_export():
config_path = './configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
cfg = mmcv.Config.fromfile(config_path)
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
with torch.no_grad():
model.forward = partial(model.forward, img_metas=[[dict()]])
dynamic_axes = {
'input_img': {
0: 'batch',
2: 'width',
3: 'height'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
torch.onnx.export(
model, [torch.rand(1, 3, 400, 500)],
'tmp.onnx',
output_names=['dets', 'labels'],
input_names=['input_img'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
def test_faster_onnx_export():
config_path = './configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
cfg = mmcv.Config.fromfile(config_path)
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
with torch.no_grad():
model.forward = partial(model.forward, img_metas=[[dict()]])
dynamic_axes = {
'input_img': {
0: 'batch',
2: 'width',
3: 'height'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
torch.onnx.export(
model, [torch.rand(1, 3, 400, 500)],
'tmp.onnx',
output_names=['dets', 'labels'],
input_names=['input_img'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
def retinanet_config():
"""RetinanNet Head Config."""
head_cfg = dict(
stacked_convs=6,
feat_channels=2,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = RetinaHead(
num_classes=4, in_channels=1, test_cfg=test_cfg, **head_cfg)
model.requires_grad_(False)
return model
def test_retina_head_forward_single():
"""Test RetinaNet Head single forward in torch and onnxruntime env."""
retina_model = retinanet_config()
feat = torch.rand(1, retina_model.in_channels, 32, 32)
# validate the result between the torch and ort
ort_validate(retina_model.forward_single, feat)
def test_retina_head_forward():
"""Test RetinaNet Head forward in torch and onnxruntime env."""
retina_model = retinanet_config()
s = 128
# RetinaNet head expects a multiple levels of features per image
feats = [
torch.rand(1, retina_model.in_channels, s // (2**(i + 2)),
s // (2**(i + 2))) # [32, 16, 8, 4, 2]
for i in range(len(retina_model.prior_generator.strides))
]
ort_validate(retina_model.forward, feats)
def test_retinanet_head_onnx_export():
"""Test RetinaNet Head _get_bboxes() in torch and onnxruntime env."""
retina_model = retinanet_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of retina_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
retina_head_data = 'retina_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, retina_head_data))
cls_score = feats[:5]
bboxes = feats[5:]
retina_model.onnx_export = partial(
retina_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(retina_model.onnx_export, (cls_score, bboxes))
def yolo_config():
"""YoloV3 Head Config."""
head_cfg = dict(
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
model = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[16, 8, 4],
test_cfg=test_cfg,
**head_cfg)
model.requires_grad_(False)
# yolov3 need eval()
model.cpu().eval()
return model
def test_yolov3_head_forward():
"""Test Yolov3 head forward() in torch and ort env."""
yolo_model = yolo_config()
# Yolov3 head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, 64 // (2**(i + 2)), 64 // (2**(i + 2)))
for i in range(len(yolo_model.in_channels))
]
ort_validate(yolo_model.forward, feats)
def test_yolov3_head_onnx_export():
"""Test yolov3 head get_bboxes() in torch and ort env."""
yolo_model = yolo_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'img_shape': (s, s, 3),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3)
}]
# The data of yolov3_head_get_bboxes.pkl contains
# a list of torch.Tensor, where each torch.Tensor
# is generated by torch.rand and each tensor size is:
# (1, 27, 32, 32), (1, 27, 16, 16), (1, 27, 8, 8).
yolo_head_data = 'yolov3_head_get_bboxes.pkl'
pred_maps = mmcv.load(osp.join(data_path, yolo_head_data))
yolo_model.onnx_export = partial(
yolo_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(yolo_model.onnx_export, pred_maps)
def fcos_config():
"""FCOS Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = FCOSHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def test_fcos_head_forward_single():
"""Test fcos forward single in torch and ort env."""
fcos_model = fcos_config()
feat = torch.rand(1, fcos_model.in_channels, 32, 32)
fcos_model.forward_single = partial(
fcos_model.forward_single,
scale=Scale(1.0).requires_grad_(False),
stride=(4, ))
ort_validate(fcos_model.forward_single, feat)
def test_fcos_head_forward():
"""Test fcos forward in mutil-level feature map."""
fcos_model = fcos_config()
s = 128
feats = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
ort_validate(fcos_model.forward, feats)
def test_fcos_head_onnx_export():
"""Test fcos head get_bboxes() in ort."""
fcos_model = fcos_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'img_shape': (s, s, 3),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3)
}]
cls_scores = [
torch.rand(1, fcos_model.num_classes, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
bboxes = [
torch.rand(1, 4, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
centerness = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
fcos_model.onnx_export = partial(
fcos_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(fcos_model.onnx_export, (cls_scores, bboxes, centerness))
def fsaf_config():
"""FSAF Head Config."""
cfg = dict(
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = FSAFHead(num_classes=4, in_channels=1, test_cfg=test_cfg, **cfg)
model.requires_grad_(False)
return model
def test_fsaf_head_forward_single():
"""Test RetinaNet Head forward_single() in torch and onnxruntime env."""
fsaf_model = fsaf_config()
feat = torch.rand(1, fsaf_model.in_channels, 32, 32)
ort_validate(fsaf_model.forward_single, feat)
def test_fsaf_head_forward():
"""Test RetinaNet Head forward in torch and onnxruntime env."""
fsaf_model = fsaf_config()
s = 128
feats = [
torch.rand(1, fsaf_model.in_channels, s // (2**(i + 2)),
s // (2**(i + 2)))
for i in range(len(fsaf_model.anchor_generator.strides))
]
ort_validate(fsaf_model.forward, feats)
def test_fsaf_head_onnx_export():
"""Test RetinaNet Head get_bboxes in torch and onnxruntime env."""
fsaf_model = fsaf_config()
s = 256
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of fsaf_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 4, 64, 64), (1, 4, 32, 32),
# (1, 4, 16, 16), (1, 4, 8, 8), (1, 4, 4, 4).
# the bboxes's size: (1, 4, 64, 64), (1, 4, 32, 32),
# (1, 4, 16, 16), (1, 4, 8, 8), (1, 4, 4, 4).
fsaf_head_data = 'fsaf_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, fsaf_head_data))
cls_score = feats[:5]
bboxes = feats[5:]
fsaf_model.onnx_export = partial(
fsaf_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(fsaf_model.onnx_export, (cls_score, bboxes))
def ssd_config():
"""SSD Head Config."""
cfg = dict(
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
model = SSDHead(
num_classes=4,
in_channels=(4, 8, 4, 2, 2, 2),
test_cfg=test_cfg,
**cfg)
model.requires_grad_(False)
return model
def test_ssd_head_forward():
"""Test SSD Head forward in torch and onnxruntime env."""
ssd_model = ssd_config()
featmap_size = [38, 19, 10, 6, 5, 3, 1]
feats = [
torch.rand(1, ssd_model.in_channels[i], featmap_size[i],
featmap_size[i]) for i in range(len(ssd_model.in_channels))
]
ort_validate(ssd_model.forward, feats)
def test_ssd_head_onnx_export():
"""Test SSD Head get_bboxes in torch and onnxruntime env."""
ssd_model = ssd_config()
s = 300
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of ssd_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 20, 38, 38), (1, 30, 19, 19),
# (1, 30, 10, 10), (1, 30, 5, 5), (1, 20, 3, 3), (1, 20, 1, 1).
# the bboxes's size: (1, 16, 38, 38), (1, 24, 19, 19),
# (1, 24, 10, 10), (1, 24, 5, 5), (1, 16, 3, 3), (1, 16, 1, 1).
ssd_head_data = 'ssd_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, ssd_head_data))
cls_score = feats[:6]
bboxes = feats[6:]
ssd_model.onnx_export = partial(
ssd_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(ssd_model.onnx_export, (cls_score, bboxes))
| 14,104 | 30.068282 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor
def test_replace_ImageToTensor():
# with MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
# without MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
def test_get_loading_pipeline():
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
]
assert expected_pipelines == \
get_loading_pipeline(pipelines)
| 2,721 | 32.604938 | 70 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_datasets/test_xml_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import DATASETS
def test_xml_dataset():
dataconfig = {
'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
'img_prefix': 'data/VOCdevkit/VOC2007/',
'pipeline': [{
'type': 'LoadImageFromFile'
}]
}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
# get_ann_info and _filter_imgs of XMLDataset
# would use self.CLASSES, we added CLASSES not NONE
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig)
| 641 | 25.75 | 69 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_datasets/test_common.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import logging
import os
import os.path as osp
import tempfile
from unittest.mock import MagicMock, patch
import mmcv
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner
from torch.utils.data import DataLoader
from mmdet.core.evaluation import DistEvalHook, EvalHook
from mmdet.datasets import DATASETS, CocoDataset, CustomDataset, build_dataset
def _create_dummy_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 2,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
annotation_3 = {
'id': 3,
'image_id': 0,
'category_id': 0,
'area': 1600,
'bbox': [150, 160, 40, 40],
'iscrowd': 0,
}
annotation_4 = {
'id': 4,
'image_id': 0,
'category_id': 0,
'area': 10000,
'bbox': [250, 260, 100, 100],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations':
[annotation_1, annotation_2, annotation_3, annotation_4],
'categories': categories
}
mmcv.dump(fake_json, json_name)
def _create_dummy_custom_pkl(pkl_name):
fake_pkl = [{
'filename': 'fake_name.jpg',
'width': 640,
'height': 640,
'ann': {
'bboxes':
np.array([[50, 60, 70, 80], [100, 120, 130, 150],
[150, 160, 190, 200], [250, 260, 350, 360]]),
'labels':
np.array([0, 0, 0, 0])
}
}]
mmcv.dump(fake_pkl, pkl_name)
def _create_dummy_results():
boxes = [
np.array([[50, 60, 70, 80, 1.0], [100, 120, 130, 150, 0.98],
[150, 160, 190, 200, 0.96], [250, 260, 350, 360, 0.95]])
]
return [boxes]
@pytest.mark.parametrize('config_path',
['./configs/_base_/datasets/voc0712.py'])
def test_dataset_init(config_path):
use_symlink = False
if not os.path.exists('./data'):
os.symlink('./tests/data', './data')
use_symlink = True
data_config = mmcv.Config.fromfile(config_path)
if 'data' not in data_config:
return
stage_names = ['train', 'val', 'test']
for stage_name in stage_names:
dataset_config = copy.deepcopy(data_config.data.get(stage_name))
dataset = build_dataset(dataset_config)
dataset[0]
if use_symlink:
os.unlink('./data')
def test_dataset_evaluation():
tmp_dir = tempfile.TemporaryDirectory()
# create dummy data
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_dummy_coco_json(fake_json_file)
# test single coco dataset evaluation
coco_dataset = CocoDataset(
ann_file=fake_json_file, classes=('car', ), pipeline=[])
fake_results = _create_dummy_results()
eval_results = coco_dataset.evaluate(fake_results, classwise=True)
assert eval_results['bbox_mAP'] == 1
assert eval_results['bbox_mAP_50'] == 1
assert eval_results['bbox_mAP_75'] == 1
# test concat dataset evaluation
fake_concat_results = _create_dummy_results() + _create_dummy_results()
# build concat dataset through two config dict
coco_cfg = dict(
type='CocoDataset',
ann_file=fake_json_file,
classes=('car', ),
pipeline=[])
concat_cfgs = [coco_cfg, coco_cfg]
concat_dataset = build_dataset(concat_cfgs)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_bbox_mAP'] == 1
assert eval_results['0_bbox_mAP_50'] == 1
assert eval_results['0_bbox_mAP_75'] == 1
assert eval_results['1_bbox_mAP'] == 1
assert eval_results['1_bbox_mAP_50'] == 1
assert eval_results['1_bbox_mAP_75'] == 1
# build concat dataset through concatenated ann_file
coco_cfg = dict(
type='CocoDataset',
ann_file=[fake_json_file, fake_json_file],
classes=('car', ),
pipeline=[])
concat_dataset = build_dataset(coco_cfg)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_bbox_mAP'] == 1
assert eval_results['0_bbox_mAP_50'] == 1
assert eval_results['0_bbox_mAP_75'] == 1
assert eval_results['1_bbox_mAP'] == 1
assert eval_results['1_bbox_mAP_50'] == 1
assert eval_results['1_bbox_mAP_75'] == 1
# create dummy data
fake_pkl_file = osp.join(tmp_dir.name, 'fake_data.pkl')
_create_dummy_custom_pkl(fake_pkl_file)
# test single custom dataset evaluation
custom_dataset = CustomDataset(
ann_file=fake_pkl_file, classes=('car', ), pipeline=[])
fake_results = _create_dummy_results()
eval_results = custom_dataset.evaluate(fake_results)
assert eval_results['mAP'] == 1
# test concat dataset evaluation
fake_concat_results = _create_dummy_results() + _create_dummy_results()
# build concat dataset through two config dict
custom_cfg = dict(
type='CustomDataset',
ann_file=fake_pkl_file,
classes=('car', ),
pipeline=[])
concat_cfgs = [custom_cfg, custom_cfg]
concat_dataset = build_dataset(concat_cfgs)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_mAP'] == 1
assert eval_results['1_mAP'] == 1
# build concat dataset through concatenated ann_file
concat_cfg = dict(
type='CustomDataset',
ann_file=[fake_pkl_file, fake_pkl_file],
classes=('car', ),
pipeline=[])
concat_dataset = build_dataset(concat_cfg)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_mAP'] == 1
assert eval_results['1_mAP'] == 1
# build concat dataset through explicit type
concat_cfg = dict(
type='ConcatDataset',
datasets=[custom_cfg, custom_cfg],
separate_eval=False)
concat_dataset = build_dataset(concat_cfg)
eval_results = concat_dataset.evaluate(fake_concat_results, metric='mAP')
assert eval_results['mAP'] == 1
assert len(concat_dataset.datasets[0].data_infos) == \
len(concat_dataset.datasets[1].data_infos)
assert len(concat_dataset.datasets[0].data_infos) == 1
tmp_dir.cleanup()
@patch('mmdet.apis.single_gpu_test', MagicMock)
@patch('mmdet.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', (EvalHook, DistEvalHook))
def test_evaluation_hook(EvalHookParam):
# create dummy data
dataloader = DataLoader(torch.ones((5, 2)))
# 0.1. dataloader is not a DataLoader object
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=-1)
# 0.2. negative interval
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=-1)
# 1. start=None, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 2. start=1, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 1 # after epoch 2
# 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3
# 5. start=0/negative, interval=1: perform evaluation after each epoch and
# before epoch 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. start=0, interval=2, dynamic_intervals=[(3, 1)]: the evaluation
# interval is 2 when it is less than 3 epoch, otherwise it is 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=0, interval=2, dynamic_intervals=[(3, 1)])
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 4)
assert evalhook.evaluate.call_count == 3
# the evaluation start epoch cannot be less than 0
runner = _build_demo_runner()
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=-2)
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 2
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # before & after epoch 3
# 7. resuming from epoch i, start = i+1/None, interval =1: perform
# evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 1
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3
def _build_demo_runner():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(
model=model, work_dir=tmp_dir, logger=logging.getLogger())
return runner
@pytest.mark.parametrize('classes, expected_length', [(['bus'], 2),
(['car'], 1),
(['bus', 'car'], 2)])
def test_allow_empty_images(classes, expected_length):
dataset_class = DATASETS.get('CocoDataset')
# Filter empty images
filtered_dataset = dataset_class(
ann_file='tests/data/coco_sample.json',
img_prefix='tests/data',
pipeline=[],
classes=classes,
filter_empty_gt=True)
# Get all
full_dataset = dataset_class(
ann_file='tests/data/coco_sample.json',
img_prefix='tests/data',
pipeline=[],
classes=classes,
filter_empty_gt=False)
assert len(filtered_dataset) == expected_length
assert len(filtered_dataset.img_ids) == expected_length
assert len(full_dataset) == 3
assert len(full_dataset.img_ids) == 3
assert filtered_dataset.CLASSES == classes
assert full_dataset.CLASSES == classes
| 12,280 | 31.749333 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_datasets/test_coco_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import pytest
from mmdet.datasets import CocoDataset
def _create_ids_error_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations': [annotation_1, annotation_2],
'categories': categories
}
mmcv.dump(fake_json, json_name)
def test_coco_annotation_ids_unique():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_ids_error_coco_json(fake_json_file)
# test annotation ids not unique error
with pytest.raises(AssertionError):
CocoDataset(ann_file=fake_json_file, classes=('car', ), pipeline=[])
| 1,293 | 20.932203 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_datasets/test_dataset_wrapper.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
import math
from collections import defaultdict
from unittest.mock import MagicMock
import numpy as np
import pytest
from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset,
MultiImageMixDataset, RepeatDataset)
def test_dataset_wrapper():
CustomDataset.load_annotations = MagicMock()
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 10
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
ann_info_list_a = []
for _ in range(len_a):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
ann_info_list_a.append(
dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
dataset_a.get_ann_info = MagicMock(
side_effect=lambda idx: ann_info_list_a[idx])
dataset_b = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_b = 20
cat_ids_list_b = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_b)
]
ann_info_list_b = []
for _ in range(len_b):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
ann_info_list_b.append(
dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
dataset_b.data_infos = MagicMock()
dataset_b.data_infos.__len__.return_value = len_b
dataset_b.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_b[idx])
dataset_b.get_ann_info = MagicMock(
side_effect=lambda idx: ann_info_list_b[idx])
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15]
assert concat_dataset.get_ann_info(5) == ann_info_list_a[5]
assert concat_dataset.get_ann_info(25) == ann_info_list_b[15]
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7]
assert repeat_dataset.get_ann_info(5) == ann_info_list_a[5]
assert repeat_dataset.get_ann_info(15) == ann_info_list_a[5]
assert repeat_dataset.get_ann_info(27) == ann_info_list_a[7]
assert len(repeat_dataset) == 10 * len(dataset_a)
category_freq = defaultdict(int)
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / len(cat_ids_list_a)
mean_freq = np.mean(list(category_freq.values()))
repeat_thr = mean_freq
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
repeat_factors = []
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(math.ceil(repeat_factor))
repeat_factors_cumsum = np.cumsum(repeat_factors)
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1]
for idx in np.random.randint(0, len(repeat_factor_dataset), 3):
assert repeat_factor_dataset[idx] == bisect.bisect_right(
repeat_factors_cumsum, idx)
assert repeat_factor_dataset.get_ann_info(idx) == ann_info_list_a[
bisect.bisect_right(repeat_factors_cumsum, idx)]
img_scale = (60, 60)
pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
]
CustomDataset.load_annotations = MagicMock()
results = []
for _ in range(2):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
results.append(dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx])
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 2
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
# test dynamic_scale deprecated
with pytest.raises(RuntimeError):
MultiImageMixDataset(dataset_a, pipeline, (80, 80))
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
# test skip_type_keys
multi_image_mix_dataset = MultiImageMixDataset(
dataset_a,
pipeline,
skip_type_keys=('MixUp', 'RandomFlip', 'Resize', 'Pad'))
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
| 7,107 | 38.054945 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_datasets/test_panoptic_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmdet.datasets.api_wrappers import pq_compute_single_core
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET, CocoPanopticDataset
try:
from panopticapi.utils import id2rgb
except ImportError:
id2rgb = None
def _create_panoptic_style_json(json_name):
image1 = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name1.jpg',
}
image2 = {
'id': 1,
'width': 640,
'height': 800,
'file_name': 'fake_name2.jpg',
}
images = [image1, image2]
annotations = [
{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
}, {
'id': 2,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [1, 189, 612, 285],
'area': 70036
}],
'file_name':
'fake_name1.jpg',
'image_id':
0
},
{
'segments_info': [
{
# Different to instance style json, there
# are duplicate ids in panoptic style json
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
},
{
'id': 4,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 1
},
{
'id': 5,
'category_id': 2,
'iscrowd': 0,
'bbox': [100, 200, 200, 300],
'area': 66666
},
{
'id': 6,
'category_id': 0,
'iscrowd': 0,
'bbox': [1, 189, -10, 285],
'area': 70036
}
],
'file_name':
'fake_name2.jpg',
'image_id':
1
}
]
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
'isthing': 1
}, {
'id': 1,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
mmcv.dump(fake_json, json_name)
return fake_json
def test_load_panoptic_style_json():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
fake_json = _create_panoptic_style_json(fake_json_file)
dataset = CocoPanopticDataset(
ann_file=fake_json_file,
classes=[cat['name'] for cat in fake_json['categories']],
pipeline=[])
ann = dataset.get_ann_info(0)
# two legal instances
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 2
# three masks for both foreground and background
assert len(ann['masks']) == 3
ann = dataset.get_ann_info(1)
# one legal instance, one illegal instance,
# one crowd instance and one background mask
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 1
assert ann['bboxes_ignore'].shape[0] == 1
assert len(ann['masks']) == 3
def _create_panoptic_gt_annotations(ann_file):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 1,
'name': 'dog',
'supercategory': 'dog',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
}, {
'id': 2,
'category_id': 0,
'area': 400,
'bbox': [30, 10, 10, 40],
'iscrowd': 0
}, {
'id': 3,
'category_id': 1,
'iscrowd': 0,
'bbox': [50, 10, 10, 5],
'area': 50
}, {
'id': 4,
'category_id': 2,
'iscrowd': 0,
'bbox': [0, 0, 80, 60],
'area': 3950
}],
'file_name':
'fake_name1.png',
'image_id':
0
}]
gt_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
# 4 is the id of the background class annotation.
gt = np.zeros((60, 80), dtype=np.int64) + 4
gt_bboxes = np.array([[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],
dtype=np.int64)
for i in range(3):
x, y, w, h = gt_bboxes[i]
gt[y:y + h, x:x + w] = i + 1 # id starts from 1
gt = id2rgb(gt).astype(np.uint8)
img_path = osp.join(osp.dirname(ann_file), 'fake_name1.png')
mmcv.imwrite(gt[:, :, ::-1], img_path)
mmcv.dump(gt_json, ann_file)
return gt_json
def test_panoptic_evaluation():
if id2rgb is None:
return
# TP for background class, IoU=3576/4324=0.827
# 2 the category id of the background class
pred = np.zeros((60, 80), dtype=np.int64) + 2
pred_bboxes = np.array(
[
[11, 11, 10, 40], # TP IoU=351/449=0.78
[38, 10, 10, 40], # FP
[51, 10, 10, 5]
], # TP IoU=45/55=0.818
dtype=np.int64)
pred_labels = np.array([0, 0, 1], dtype=np.int64)
for i in range(3):
x, y, w, h = pred_bboxes[i]
pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i]
tmp_dir = tempfile.TemporaryDirectory()
ann_file = osp.join(tmp_dir.name, 'panoptic.json')
gt_json = _create_panoptic_gt_annotations(ann_file)
results = [{'pan_results': pred}]
dataset = CocoPanopticDataset(
ann_file=ann_file,
seg_prefix=tmp_dir.name,
classes=[cat['name'] for cat in gt_json['categories']],
pipeline=[])
# For 'person', sq = 0.78 / 1, rq = 1 / 2( 1 tp + 0.5 * (1 fn + 1 fp))
# For 'dog', sq = 0.818, rq = 1 / 1
# For 'wall', sq = 0.827, rq = 1 / 1
# Here is the results for all classes:
# +--------+--------+--------+---------+------------+
# | | PQ | SQ | RQ | categories |
# +--------+--------+--------+---------+------------+
# | All | 67.869 | 80.898 | 83.333 | 3 |
# | Things | 60.453 | 79.996 | 75.000 | 2 |
# | Stuff | 82.701 | 82.701 | 100.000 | 1 |
# +--------+--------+--------+---------+------------+
parsed_results = dataset.evaluate(results)
assert np.isclose(parsed_results['PQ'], 67.869)
assert np.isclose(parsed_results['SQ'], 80.898)
assert np.isclose(parsed_results['RQ'], 83.333)
assert np.isclose(parsed_results['PQ_th'], 60.453)
assert np.isclose(parsed_results['SQ_th'], 79.996)
assert np.isclose(parsed_results['RQ_th'], 75.000)
assert np.isclose(parsed_results['PQ_st'], 82.701)
assert np.isclose(parsed_results['SQ_st'], 82.701)
assert np.isclose(parsed_results['RQ_st'], 100.000)
# test jsonfile_prefix
outfile_prefix = osp.join(tmp_dir.name, 'results')
parsed_results = dataset.evaluate(results, jsonfile_prefix=outfile_prefix)
assert np.isclose(parsed_results['PQ'], 67.869)
assert np.isclose(parsed_results['SQ'], 80.898)
assert np.isclose(parsed_results['RQ'], 83.333)
assert np.isclose(parsed_results['PQ_th'], 60.453)
assert np.isclose(parsed_results['SQ_th'], 79.996)
assert np.isclose(parsed_results['RQ_th'], 75.000)
assert np.isclose(parsed_results['PQ_st'], 82.701)
assert np.isclose(parsed_results['SQ_st'], 82.701)
assert np.isclose(parsed_results['RQ_st'], 100.000)
# test classwise
parsed_results = dataset.evaluate(results, classwise=True)
assert np.isclose(parsed_results['PQ'], 67.869)
assert np.isclose(parsed_results['SQ'], 80.898)
assert np.isclose(parsed_results['RQ'], 83.333)
assert np.isclose(parsed_results['PQ_th'], 60.453)
assert np.isclose(parsed_results['SQ_th'], 79.996)
assert np.isclose(parsed_results['RQ_th'], 75.000)
assert np.isclose(parsed_results['PQ_st'], 82.701)
assert np.isclose(parsed_results['SQ_st'], 82.701)
assert np.isclose(parsed_results['RQ_st'], 100.000)
# test the api wrapper of `pq_compute_single_core`
# Codes are copied from `coco_panoptic.py` and modified
result_files, _ = dataset.format_results(
results, jsonfile_prefix=outfile_prefix)
imgs = dataset.coco.imgs
gt_json = dataset.coco.img_ann_map # image to annotations
gt_json = [{
'image_id': k,
'segments_info': v,
'file_name': imgs[k]['segm_file']
} for k, v in gt_json.items()]
pred_json = mmcv.load(result_files['panoptic'])
pred_json = dict((el['image_id'], el) for el in pred_json['annotations'])
# match the gt_anns and pred_anns in the same image
matched_annotations_list = []
for gt_ann in gt_json:
img_id = gt_ann['image_id']
matched_annotations_list.append((gt_ann, pred_json[img_id]))
gt_folder = dataset.seg_prefix
pred_folder = osp.join(osp.dirname(outfile_prefix), 'panoptic')
pq_stat = pq_compute_single_core(0, matched_annotations_list, gt_folder,
pred_folder, dataset.categories)
pq_all = pq_stat.pq_average(dataset.categories, isthing=None)[0]
assert np.isclose(pq_all['pq'] * 100, 67.869)
assert np.isclose(pq_all['sq'] * 100, 80.898)
assert np.isclose(pq_all['rq'] * 100, 83.333)
assert pq_all['n'] == 3
| 10,453 | 29.747059 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_datasets/test_custom_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import unittest
from unittest.mock import MagicMock, patch
import pytest
from mmdet.datasets import DATASETS
@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
@pytest.mark.parametrize('dataset',
['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if dataset in ['CocoDataset', 'CityscapesDataset']:
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=('bus', 'car'),
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ('bus', 'car')
print(custom_dataset)
# Test setting classes as a list
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['bus', 'car'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
# Test overriding not a subset
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['foo'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['foo']
print(custom_dataset)
# Test default behavior
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=None,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES == original_classes
print(custom_dataset)
# Test sending file path
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=tmp_file.name,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
tmp_file.close()
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
class CustomDatasetTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'data')
self.dataset_class = DATASETS.get('XMLDataset')
def test_data_infos__default_db_directories(self):
"""Test correct data read having a Pacal-VOC directory structure."""
test_dataset_root = os.path.join(self.data_dir, 'VOCdevkit', 'VOC2007')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=os.path.join(test_dataset_root, 'ImageSets', 'Main',
'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True)
self.assertListEqual([{
'id': '000001',
'filename': 'JPEGImages/000001.jpg',
'width': 353,
'height': 500
}], custom_ds.data_infos)
def test_data_infos__overridden_db_subdirectories(self):
"""Test correct data read having a customized directory structure."""
test_dataset_root = os.path.join(self.data_dir, 'custom_dataset')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=os.path.join(test_dataset_root, 'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True,
img_prefix='',
img_subdir='images',
ann_subdir='images')
self.assertListEqual([{
'id': '000001',
'filename': 'images/000001.jpg',
'width': 353,
'height': 500
}], custom_ds.data_infos)
| 4,775 | 33.114286 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_formatting.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
def test_default_format_bundle():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
bundle = dict(type='DefaultFormatBundle')
bundle = build_from_cfg(bundle, PIPELINES)
results = load(results)
assert 'pad_shape' not in results
assert 'scale_factor' not in results
assert 'img_norm_cfg' not in results
results = bundle(results)
assert 'pad_shape' in results
assert 'scale_factor' in results
assert 'img_norm_cfg' in results
| 786 | 30.48 | 65 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.bbox.assigners import MaxIoUAssigner
from mmdet.core.bbox.samplers import (OHEMSampler, RandomSampler,
ScoreHLRSampler)
def test_random_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, ).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def _context_for_ohem():
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))))
from test_models.test_forward import _get_detector_cfg
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
context = build_detector(model).roi_head
return context
def test_ohem_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
def test_score_hlr_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
context = _context_for_ohem()
sampler = ScoreHLRSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
gt_bboxes_ignore = torch.Tensor([])
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
# empty bbox
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sample_result, _ = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.neg_inds) == 0
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
# empty gt
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sample_result, _ = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_inds) == 0
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
# non-empty input
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sample_result, _ = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
| 9,735 | 28.50303 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_loading.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
| 3,773 | 40.021739 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/test_transform.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from mmcv.utils import build_from_cfg
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.datasets.builder import PIPELINES
from .utils import create_random_bboxes
def test_resize():
# test assertion if img_scale is a list
with pytest.raises(AssertionError):
transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion if len(img_scale) while ratio_range is not None
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
ratio_range=(0.9, 1.1),
keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid multiscale_mode
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
keep_ratio=True,
multiscale_mode='2333')
build_from_cfg(transform, PIPELINES)
# test assertion if both scale and scale_factor are set
with pytest.raises(AssertionError):
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
transform = build_from_cfg(transform, PIPELINES)
results = load(results)
results['scale'] = (1333, 800)
results['scale_factor'] = 1.0
results = transform(results)
transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['img_fields'] = ['img', 'img2']
results = resize_module(results)
assert np.equal(results['img'], results['img2']).all()
results.pop('scale')
results.pop('scale_factor')
transform = dict(
type='Resize',
img_scale=(1280, 800),
multiscale_mode='value',
keep_ratio=False)
resize_module = build_from_cfg(transform, PIPELINES)
results = resize_module(results)
assert np.equal(results['img'], results['img2']).all()
assert results['img_shape'] == (800, 1280, 3)
assert results['img'].dtype == results['img'].dtype == np.uint8
results_seg = {
'img': img,
'img_shape': img.shape,
'ori_shape': img.shape,
'gt_semantic_seg': copy.deepcopy(img),
'gt_seg': copy.deepcopy(img),
'seg_fields': ['gt_semantic_seg', 'gt_seg']
}
transform = dict(
type='Resize',
img_scale=(640, 400),
multiscale_mode='value',
keep_ratio=False)
resize_module = build_from_cfg(transform, PIPELINES)
results_seg = resize_module(results_seg)
assert results_seg['gt_semantic_seg'].shape == results_seg['gt_seg'].shape
assert results_seg['img_shape'] == (400, 640, 3)
assert results_seg['img_shape'] != results_seg['ori_shape']
assert results_seg['gt_semantic_seg'].shape == results_seg['img_shape']
assert np.equal(results_seg['gt_semantic_seg'],
results_seg['gt_seg']).all()
def test_flip():
# test assertion for invalid flip_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', flip_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for 0 <= sum(flip_ratio) <= 1
with pytest.raises(AssertionError):
transform = dict(
type='RandomFlip',
flip_ratio=[0.7, 0.8],
direction=['horizontal', 'vertical'])
build_from_cfg(transform, PIPELINES)
# test assertion for mismatch between number of flip_ratio and direction
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', flip_ratio=[0.4, 0.5])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid direction
with pytest.raises(AssertionError):
transform = dict(
type='RandomFlip', flip_ratio=1., direction='horizonta')
build_from_cfg(transform, PIPELINES)
transform = dict(type='RandomFlip', flip_ratio=1.)
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = flip_module(results)
assert np.equal(results['img'], results['img2']).all()
flip_module = build_from_cfg(transform, PIPELINES)
results = flip_module(results)
assert np.equal(results['img'], results['img2']).all()
assert np.equal(original_img, results['img']).all()
# test flip_ratio is float, direction is list
transform = dict(
type='RandomFlip',
flip_ratio=0.9,
direction=['horizontal', 'vertical', 'diagonal'])
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img']
results = flip_module(results)
if results['flip']:
assert np.array_equal(
mmcv.imflip(original_img, results['flip_direction']),
results['img'])
else:
assert np.array_equal(original_img, results['img'])
# test flip_ratio is list, direction is list
transform = dict(
type='RandomFlip',
flip_ratio=[0.3, 0.3, 0.2],
direction=['horizontal', 'vertical', 'diagonal'])
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img']
results = flip_module(results)
if results['flip']:
assert np.array_equal(
mmcv.imflip(original_img, results['flip_direction']),
results['img'])
else:
assert np.array_equal(original_img, results['img'])
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
transform = dict(type='RandomCrop', crop_size=(-1, 0))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='RandomCrop', crop_size=(h - 20, w - 20))
crop_module = build_from_cfg(transform, PIPELINES)
results = crop_module(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert results['img_shape'][:2] == (h - 20, w - 20)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes'].shape[0] == 8
assert results['gt_bboxes_ignore'].shape[0] == 2
def area(bboxes):
return np.prod(bboxes[:, 2:4] - bboxes[:, 0:2], axis=1)
assert (area(results['gt_bboxes']) <= area(gt_bboxes)).all()
assert (area(results['gt_bboxes_ignore']) <= area(gt_bboxes_ignore)).all()
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
# test assertion for invalid crop_type
with pytest.raises(ValueError):
transform = dict(
type='RandomCrop', crop_size=(1, 1), crop_type='unknown')
build_from_cfg(transform, PIPELINES)
# test assertion for invalid crop_size
with pytest.raises(AssertionError):
transform = dict(
type='RandomCrop', crop_type='relative', crop_size=(0, 0))
build_from_cfg(transform, PIPELINES)
def _construct_toy_data():
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
# image
results['img'] = img
results['img_shape'] = img.shape
results['img_fields'] = ['img']
# bboxes
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32)
results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]],
dtype=np.float32)
# labels
results['gt_labels'] = np.array([1], dtype=np.int64)
return results
# test crop_type "relative_range"
results = _construct_toy_data()
transform = dict(
type='RandomCrop',
crop_type='relative_range',
crop_size=(0.3, 0.7),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert int(2 * 0.3 + 0.5) <= h <= int(2 * 1 + 0.5)
assert int(4 * 0.7 + 0.5) <= w <= int(4 * 1 + 0.5)
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
# test crop_type "relative"
transform = dict(
type='RandomCrop',
crop_type='relative',
crop_size=(0.3, 0.7),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert h == int(2 * 0.3 + 0.5) and w == int(4 * 0.7 + 0.5)
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
# test crop_type "absolute"
transform = dict(
type='RandomCrop',
crop_type='absolute',
crop_size=(1, 2),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert h == 1 and w == 2
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
# test crop_type "absolute_range"
transform = dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(1, 20),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert 1 <= h <= 2 and 1 <= w <= 4
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
def test_min_iou_random_crop():
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(1, w, h)
gt_bboxes_ignore = create_random_bboxes(1, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='MinIoURandomCrop')
crop_module = build_from_cfg(transform, PIPELINES)
# Test for img_fields
results_test = copy.deepcopy(results)
results_test['img1'] = results_test['img']
results_test['img_fields'] = ['img', 'img1']
with pytest.raises(AssertionError):
crop_module(results_test)
results = crop_module(results)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
patch = np.array([0, 0, results['img_shape'][1], results['img_shape'][0]])
ious = bbox_overlaps(patch.reshape(-1, 4),
results['gt_bboxes']).reshape(-1)
ious_ignore = bbox_overlaps(
patch.reshape(-1, 4), results['gt_bboxes_ignore']).reshape(-1)
mode = crop_module.mode
if mode == 1:
assert np.equal(results['gt_bboxes'], gt_bboxes).all()
assert np.equal(results['gt_bboxes_ignore'], gt_bboxes_ignore).all()
else:
assert (ious >= mode).all()
assert (ious_ignore >= mode).all()
def test_pad():
# test assertion if both size_divisor and size is None
with pytest.raises(AssertionError):
transform = dict(type='Pad')
build_from_cfg(transform, PIPELINES)
transform = dict(type='Pad', size_divisor=32)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = transform(results)
assert np.equal(results['img'], results['img2']).all()
# original img already divisible by 32
assert np.equal(results['img'], original_img).all()
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
resize_transform = dict(
type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(resize_transform, PIPELINES)
results = resize_module(results)
results = transform(results)
img_shape = results['img'].shape
assert np.equal(results['img'], results['img2']).all()
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
# test the size and size_divisor must be None when pad2square is True
with pytest.raises(AssertionError):
transform = dict(type='Pad', size_divisor=32, pad_to_square=True)
build_from_cfg(transform, PIPELINES)
transform = dict(type='Pad', pad_to_square=True)
transform = build_from_cfg(transform, PIPELINES)
results['img'] = img
results = transform(results)
assert results['img'].shape[0] == results['img'].shape[1]
# test the pad_val is converted to a dict
transform = dict(type='Pad', size_divisor=32, pad_val=0)
with pytest.deprecated_call():
transform = build_from_cfg(transform, PIPELINES)
assert isinstance(transform.pad_val, dict)
results = transform(results)
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
def test_normalize():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
transform = dict(type='Normalize', **img_norm_cfg)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = transform(results)
assert np.equal(results['img'], results['img2']).all()
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_albu_transform():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = build_from_cfg(albu_transform, PIPELINES)
normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True)
normalize = build_from_cfg(normalize, PIPELINES)
# Execute transforms
results = load(results)
results = albu_transform(results)
results = normalize(results)
assert results['img'].dtype == np.float32
def test_random_center_crop_pad():
# test assertion for invalid crop_size while test_mode=False
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(-1, 0),
test_mode=False,
test_pad_mode=None)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid ratios while test_mode=False
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(1.0),
test_mode=False,
test_pad_mode=None)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid mean, std and to_rgb
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=None)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid crop_size while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid ratios while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid border while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid test_pad_mode while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('do_nothing', 100))
build_from_cfg(transform, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile', to_float32=True)
load = build_from_cfg(load, PIPELINES)
results = load(results)
test_results = copy.deepcopy(results)
h, w, _ = results['img_shape']
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
train_transform = dict(
type='RandomCenterCropPad',
crop_size=(h - 20, w - 20),
ratios=(1.0, ),
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=False,
test_pad_mode=None)
crop_module = build_from_cfg(train_transform, PIPELINES)
train_results = crop_module(results)
assert train_results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert train_results['pad_shape'][:2] == (h - 20, w - 20)
assert train_results['gt_bboxes'].shape[0] == 8
assert train_results['gt_bboxes_ignore'].shape[0] == 2
assert train_results['gt_bboxes'].dtype == np.float32
assert train_results['gt_bboxes_ignore'].dtype == np.float32
test_transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
crop_module = build_from_cfg(test_transform, PIPELINES)
test_results = crop_module(test_results)
assert test_results['img'].shape[:2] == (h | 127, w | 127)
assert test_results['pad_shape'][:2] == (h | 127, w | 127)
assert 'border' in test_results
def test_multi_scale_flip_aug():
# test assertion if give both scale_factor and img_scale
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
img_scale=[(1333, 800)],
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
# test assertion if both scale_factor and img_scale are None
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
scale_factor=None,
img_scale=None,
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
# test assertion if img_scale is not tuple or list of tuple
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
img_scale=[1333, 800],
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
# test assertion if flip_direction is not str or list of str
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
img_scale=[(1333, 800)],
flip_direction=1,
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
scale_transform = dict(
type='MultiScaleFlipAug',
img_scale=[(1333, 800), (1333, 640)],
transforms=[dict(type='Resize', keep_ratio=True)])
transform = build_from_cfg(scale_transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['img_fields'] = ['img']
scale_results = transform(copy.deepcopy(results))
assert len(scale_results['img']) == 2
assert scale_results['img'][0].shape == (750, 1333, 3)
assert scale_results['img_shape'][0] == (750, 1333, 3)
assert scale_results['img'][1].shape == (640, 1138, 3)
assert scale_results['img_shape'][1] == (640, 1138, 3)
scale_factor_transform = dict(
type='MultiScaleFlipAug',
scale_factor=[0.8, 1.0, 1.2],
transforms=[dict(type='Resize', keep_ratio=False)])
transform = build_from_cfg(scale_factor_transform, PIPELINES)
scale_factor_results = transform(copy.deepcopy(results))
assert len(scale_factor_results['img']) == 3
assert scale_factor_results['img'][0].shape == (230, 409, 3)
assert scale_factor_results['img_shape'][0] == (230, 409, 3)
assert scale_factor_results['img'][1].shape == (288, 512, 3)
assert scale_factor_results['img_shape'][1] == (288, 512, 3)
assert scale_factor_results['img'][2].shape == (345, 614, 3)
assert scale_factor_results['img_shape'][2] == (345, 614, 3)
# test pipeline of coco_detection
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
load_cfg, multi_scale_cfg = mmcv.Config.fromfile(
'configs/_base_/datasets/coco_detection.py').test_pipeline
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = transform(load(results))
assert len(results['img']) == 1
assert len(results['img_metas']) == 1
assert isinstance(results['img'][0], torch.Tensor)
assert isinstance(results['img_metas'][0], mmcv.parallel.DataContainer)
assert results['img_metas'][0].data['ori_shape'] == (288, 512, 3)
assert results['img_metas'][0].data['img_shape'] == (750, 1333, 3)
assert results['img_metas'][0].data['pad_shape'] == (768, 1344, 3)
assert results['img_metas'][0].data['scale_factor'].tolist() == [
2.603515625, 2.6041667461395264, 2.603515625, 2.6041667461395264
]
def test_cutout():
# test n_holes
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=(5, 3), cutout_shape=(8, 8))
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=(3, 4, 5), cutout_shape=(8, 8))
build_from_cfg(transform, PIPELINES)
# test cutout_shape and cutout_ratio
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=1, cutout_shape=8)
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=1, cutout_ratio=0.2)
build_from_cfg(transform, PIPELINES)
# either of cutout_shape and cutout_ratio should be given
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=1)
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(
type='CutOut',
n_holes=1,
cutout_shape=(2, 2),
cutout_ratio=(0.4, 0.4))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['pad_shape'] = img.shape
results['img_fields'] = ['img']
transform = dict(type='CutOut', n_holes=1, cutout_shape=(10, 10))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() < img.sum()
transform = dict(type='CutOut', n_holes=1, cutout_ratio=(0.8, 0.8))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() < img.sum()
transform = dict(
type='CutOut',
n_holes=(2, 4),
cutout_shape=[(10, 10), (15, 15)],
fill_in=(255, 255, 255))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() > img.sum()
transform = dict(
type='CutOut',
n_holes=1,
cutout_ratio=(0.8, 0.8),
fill_in=(255, 255, 255))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() > img.sum()
def test_random_shift():
# test assertion for invalid shift_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomShift', shift_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid max_shift_px
with pytest.raises(AssertionError):
transform = dict(type='RandomShift', max_shift_px=-1)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='RandomShift', shift_ratio=1.0)
random_shift_module = build_from_cfg(transform, PIPELINES)
results = random_shift_module(results)
assert results['img'].shape[:2] == (h, w)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_random_affine():
# test assertion for invalid translate_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomAffine', max_translate_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid scaling_ratio_range
with pytest.raises(AssertionError):
transform = dict(type='RandomAffine', scaling_ratio_range=(1.5, 0.5))
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(type='RandomAffine', scaling_ratio_range=(0, 0.5))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='RandomAffine')
random_affine_module = build_from_cfg(transform, PIPELINES)
results = random_affine_module(results)
assert results['img'].shape[:2] == (h, w)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
# test filter bbox
gt_bboxes = np.array([[0, 0, 1, 1], [0, 0, 3, 100]], dtype=np.float32)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
transform = dict(
type='RandomAffine',
max_rotate_degree=0.,
max_translate_ratio=0.,
scaling_ratio_range=(1., 1.),
max_shear_degree=0.,
border=(0, 0),
min_bbox_size=2,
max_aspect_ratio=20,
skip_filter=False)
random_affine_module = build_from_cfg(transform, PIPELINES)
results = random_affine_module(results)
assert results['gt_bboxes'].shape[0] == 0
assert results['gt_labels'].shape[0] == 0
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_mosaic():
# test assertion for invalid img_scale
with pytest.raises(AssertionError):
transform = dict(type='Mosaic', img_scale=640)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='Mosaic', img_scale=(10, 12))
mosaic_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid mix_results
with pytest.raises(AssertionError):
mosaic_module(results)
results['mix_results'] = [copy.deepcopy(results)] * 3
results = mosaic_module(results)
assert results['img'].shape[:2] == (20, 24)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_mixup():
# test assertion for invalid img_scale
with pytest.raises(AssertionError):
transform = dict(type='MixUp', img_scale=640)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='MixUp', img_scale=(10, 12))
mixup_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid mix_results
with pytest.raises(AssertionError):
mixup_module(results)
with pytest.raises(AssertionError):
results['mix_results'] = [copy.deepcopy(results)] * 2
mixup_module(results)
results['mix_results'] = [copy.deepcopy(results)]
results = mixup_module(results)
assert results['img'].shape[:2] == (288, 512)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_photo_metric_distortion():
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
transform = dict(type='PhotoMetricDistortion')
distortion_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid img_fields
with pytest.raises(AssertionError):
results = dict()
results['img'] = img
results['img2'] = img
results['img_fields'] = ['img', 'img2']
distortion_module(results)
# test uint8 input
results = dict()
results['img'] = img
results = distortion_module(results)
assert results['img'].dtype == np.float32
# test float32 input
results = dict()
results['img'] = img.astype(np.float32)
results = distortion_module(results)
assert results['img'].dtype == np.float32
| 37,401 | 36.552209 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/test_rotate.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
from mmcv.utils import build_from_cfg
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import PIPELINES
from .utils import check_result_same, construct_toy_data
def test_rotate():
# test assertion for invalid type of max_rotate_angle
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=1, max_rotate_angle=(30, ))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of scale
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=2, scale=(1.2, ))
build_from_cfg(transform, PIPELINES)
# test ValueError for invalid type of img_fill_val
with pytest.raises(ValueError):
transform = dict(
type='Rotate', level=2, img_fill_val=[
128,
])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid number of elements in center
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=2, center=(0.5, ))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of center
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=2, center=[0, 0])
build_from_cfg(transform, PIPELINES)
# test case when no rotate aug (level=0)
results = construct_toy_data()
img_fill_val = (104, 116, 124)
seg_ignore_label = 255
transform = dict(
type='Rotate',
level=0,
prob=1.,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
)
rotate_module = build_from_cfg(transform, PIPELINES)
results_wo_rotate = rotate_module(copy.deepcopy(results))
check_result_same(results, results_wo_rotate)
# test case when no rotate aug (prob<=0)
transform = dict(
type='Rotate', level=10, prob=0., img_fill_val=img_fill_val, scale=0.6)
rotate_module = build_from_cfg(transform, PIPELINES)
results_wo_rotate = rotate_module(copy.deepcopy(results))
check_result_same(results, results_wo_rotate)
# test clockwise rotation with angle 90
results = construct_toy_data()
img_fill_val = 128
transform = dict(
type='Rotate',
level=10,
max_rotate_angle=90,
img_fill_val=img_fill_val,
# set random_negative_prob to 0 for clockwise rotation
random_negative_prob=0.,
prob=1.)
rotate_module = build_from_cfg(transform, PIPELINES)
results_rotated = rotate_module(copy.deepcopy(results))
img_r = np.array([[img_fill_val, 6, 2, img_fill_val],
[img_fill_val, 7, 3, img_fill_val]]).astype(np.uint8)
img_r = np.stack([img_r, img_r, img_r], axis=-1)
results_gt = copy.deepcopy(results)
results_gt['img'] = img_r
results_gt['gt_bboxes'] = np.array([[1., 0., 2., 1.]], dtype=np.float32)
results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32)
gt_masks = np.array([[0, 1, 1, 0], [0, 0, 1, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
results_gt['gt_semantic_seg'] = np.array(
[[255, 6, 2, 255], [255, 7, 3,
255]]).astype(results['gt_semantic_seg'].dtype)
check_result_same(results_gt, results_rotated)
# test clockwise rotation with angle 90, PolygonMasks
results = construct_toy_data(poly2mask=False)
results_rotated = rotate_module(copy.deepcopy(results))
gt_masks = [[np.array([2, 0, 2, 1, 1, 1, 1, 0], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_rotated)
# test counter-clockwise rotation with angle 90,
# and specify the ratation center
img_fill_val = (104, 116, 124)
transform = dict(
type='Rotate',
level=10,
max_rotate_angle=90,
center=(0, 0),
img_fill_val=img_fill_val,
# set random_negative_prob to 1 for counter-clockwise rotation
random_negative_prob=1.,
prob=1.)
results = construct_toy_data()
rotate_module = build_from_cfg(transform, PIPELINES)
results_rotated = rotate_module(copy.deepcopy(results))
results_gt = copy.deepcopy(results)
h, w = results['img'].shape[:2]
img_r = np.stack([
np.ones((h, w)) * img_fill_val[0],
np.ones((h, w)) * img_fill_val[1],
np.ones((h, w)) * img_fill_val[2]
],
axis=-1).astype(np.uint8)
img_r[0, 0, :] = 1
img_r[0, 1, :] = 5
results_gt['img'] = img_r
results_gt['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32)
results_gt['gt_labels'] = np.empty((0, ), dtype=np.int64)
gt_masks = np.empty((0, h, w), dtype=np.uint8)
results_gt['gt_masks'] = BitmapMasks(gt_masks, h, w)
gt_seg = (np.ones((h, w)) * 255).astype(results['gt_semantic_seg'].dtype)
gt_seg[0, 0], gt_seg[0, 1] = 1, 5
results_gt['gt_semantic_seg'] = gt_seg
check_result_same(results_gt, results_rotated)
transform = dict(
type='Rotate',
level=10,
max_rotate_angle=90,
center=(0),
img_fill_val=img_fill_val,
random_negative_prob=1.,
prob=1.)
rotate_module = build_from_cfg(transform, PIPELINES)
results_rotated = rotate_module(copy.deepcopy(results))
check_result_same(results_gt, results_rotated)
# test counter-clockwise rotation with angle 90,
# and specify the ratation center, PolygonMasks
results = construct_toy_data(poly2mask=False)
results_rotated = rotate_module(copy.deepcopy(results))
gt_masks = [[np.array([0, 0, 0, 0, 1, 0, 1, 0], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_rotated)
# test AutoAugment equipped with Rotate
policies = [[dict(type='Rotate', level=10, prob=1.)]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
policies = [[
dict(type='Rotate', level=10, prob=1.),
dict(
type='Rotate',
level=8,
max_rotate_angle=90,
center=(0),
img_fill_val=img_fill_val)
]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
| 6,600 | 37.156069 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
def _check_fields(results, pipeline_results, keys):
"""Check data in fields from two results are same."""
for key in keys:
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert np.equal(results[key].to_ndarray(),
pipeline_results[key].to_ndarray()).all()
else:
assert np.equal(results[key], pipeline_results[key]).all()
assert results[key].dtype == pipeline_results[key].dtype
def check_result_same(results, pipeline_results):
"""Check whether the `pipeline_results` is the same with the predefined
`results`.
Args:
results (dict): Predefined results which should be the standard output
of the transform pipeline.
pipeline_results (dict): Results processed by the transform pipeline.
"""
# check image
_check_fields(results, pipeline_results,
results.get('img_fields', ['img']))
# check bboxes
_check_fields(results, pipeline_results, results.get('bbox_fields', []))
# check masks
_check_fields(results, pipeline_results, results.get('mask_fields', []))
# check segmentations
_check_fields(results, pipeline_results, results.get('seg_fields', []))
# check gt_labels
if 'gt_labels' in results:
assert np.equal(results['gt_labels'],
pipeline_results['gt_labels']).all()
def construct_toy_data(poly2mask=True):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
# image
results['img'] = img
results['img_shape'] = img.shape
results['img_fields'] = ['img']
# bboxes
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32)
results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]],
dtype=np.float32)
# labels
results['gt_labels'] = np.array([1], dtype=np.int64)
# masks
results['mask_fields'] = ['gt_masks']
if poly2mask:
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
else:
raw_masks = [[np.array([0, 0, 2, 0, 2, 1, 0, 1], dtype=np.float)]]
results['gt_masks'] = PolygonMasks(raw_masks, 2, 4)
# segmentations
results['seg_fields'] = ['gt_semantic_seg']
results['gt_semantic_seg'] = img[..., 0]
return results
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
| 3,054 | 37.670886 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/test_img_augment.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import mmcv
import numpy as np
from mmcv.utils import build_from_cfg
from numpy.testing import assert_array_equal
from mmdet.datasets.builder import PIPELINES
from .utils import construct_toy_data
def test_adjust_color():
results = construct_toy_data()
# test wighout aug
transform = dict(type='ColorTransform', prob=0, level=10)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test with factor 1
img = results['img']
transform = dict(type='ColorTransform', prob=1, level=10)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], img)
# test with factor 0
transform_module.factor = 0
img_gray = mmcv.bgr2gray(img.copy())
img_r = np.stack([img_gray, img_gray, img_gray], axis=-1)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], img_r)
# test with factor 0.5
transform_module.factor = 0.5
results_transformed = transform_module(copy.deepcopy(results))
img = results['img']
assert_array_equal(
results_transformed['img'],
np.round(np.clip((img * 0.5 + img_r * 0.5), 0, 255)).astype(img.dtype))
def test_imequalize(nb_rand_test=100):
def _imequalize(img):
# equalize the image using PIL.ImageOps.equalize
from PIL import ImageOps, Image
img = Image.fromarray(img)
equalized_img = np.asarray(ImageOps.equalize(img))
return equalized_img
results = construct_toy_data()
# test wighout aug
transform = dict(type='EqualizeTransform', prob=0)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test equalize with case step=0
transform = dict(type='EqualizeTransform', prob=1.)
transform_module = build_from_cfg(transform, PIPELINES)
img = np.array([[0, 0, 0], [120, 120, 120], [255, 255, 255]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results['img'] = img
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], img)
# test equalize with randomly sampled image.
for _ in range(nb_rand_test):
img = np.clip(np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
255).astype(np.uint8)
results['img'] = img
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], _imequalize(img))
def test_adjust_brightness(nb_rand_test=100):
def _adjust_brightness(img, factor):
# adjust the brightness of image using
# PIL.ImageEnhance.Brightness
from PIL.ImageEnhance import Brightness
from PIL import Image
img = Image.fromarray(img)
brightened_img = Brightness(img).enhance(factor)
return np.asarray(brightened_img)
results = construct_toy_data()
# test wighout aug
transform = dict(type='BrightnessTransform', level=10, prob=0)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 1.0
transform = dict(type='BrightnessTransform', level=10, prob=1.)
transform_module = build_from_cfg(transform, PIPELINES)
transform_module.factor = 1.0
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 0.0
transform_module.factor = 0.0
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'],
np.zeros_like(results['img']))
# test with randomly sampled images and factors.
for _ in range(nb_rand_test):
img = np.clip(np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
255).astype(np.uint8)
factor = np.random.uniform()
transform_module.factor = factor
results['img'] = img
np.testing.assert_allclose(
transform_module(copy.deepcopy(results))['img'].astype(np.int32),
_adjust_brightness(img, factor).astype(np.int32),
rtol=0,
atol=1)
def test_adjust_contrast(nb_rand_test=100):
def _adjust_contrast(img, factor):
from PIL.ImageEnhance import Contrast
from PIL import Image
# Image.fromarray defaultly supports RGB, not BGR.
# convert from BGR to RGB
img = Image.fromarray(img[..., ::-1], mode='RGB')
contrasted_img = Contrast(img).enhance(factor)
# convert from RGB to BGR
return np.asarray(contrasted_img)[..., ::-1]
results = construct_toy_data()
# test wighout aug
transform = dict(type='ContrastTransform', level=10, prob=0)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 1.0
transform = dict(type='ContrastTransform', level=10, prob=1.)
transform_module = build_from_cfg(transform, PIPELINES)
transform_module.factor = 1.0
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 0.0
transform_module.factor = 0.0
results_transformed = transform_module(copy.deepcopy(results))
np.testing.assert_allclose(
results_transformed['img'],
_adjust_contrast(results['img'], 0.),
rtol=0,
atol=1)
# test adjust_contrast with randomly sampled images and factors.
for _ in range(nb_rand_test):
img = np.clip(np.random.uniform(0, 1, (1200, 1000, 3)) * 260, 0,
255).astype(np.uint8)
factor = np.random.uniform()
transform_module.factor = factor
results['img'] = img
results_transformed = transform_module(copy.deepcopy(results))
# Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast
# and mmcv.adjust_contrast comes from the gap that converts from
# a color image to gray image using mmcv or PIL.
np.testing.assert_allclose(
transform_module(copy.deepcopy(results))['img'].astype(np.int32),
_adjust_contrast(results['img'], factor).astype(np.int32),
rtol=0,
atol=1)
| 6,903 | 38.451429 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import check_result_same, construct_toy_data, create_random_bboxes
__all__ = ['create_random_bboxes', 'construct_toy_data', 'check_result_same']
| 206 | 40.4 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/test_models_aug_test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import torch
from mmcv.parallel import collate
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
from mmdet.models import build_detector
def model_aug_test_template(cfg_file):
# get config
cfg = mmcv.Config.fromfile(cfg_file)
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['img_scale'] = [(1333, 800), (800, 600), (640, 480)]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
return aug_result
def test_aug_test_size():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
# get config
transform = dict(
type='MultiScaleFlipAug',
transforms=[],
img_scale=[(1333, 800), (800, 600), (640, 480)],
flip=True,
flip_direction=['horizontal', 'vertical', 'diagonal'])
multi_aug_test_module = build_from_cfg(transform, PIPELINES)
results = load(results)
results = multi_aug_test_module(load(results))
# len(["original", "horizontal", "vertical", "diagonal"]) *
# len([(1333, 800), (800, 600), (640, 480)])
assert len(results['img']) == 12
def test_cascade_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 80
def test_mask_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_htc_aug_test():
aug_result = model_aug_test_template('configs/htc/htc_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_scnet_aug_test():
aug_result = model_aug_test_template(
'configs/scnet/scnet_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_cornernet_aug_test():
# get config
cfg = mmcv.Config.fromfile(
'configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py')
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['scale_factor'] = [0.5, 1.0, 2.0]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
assert len(aug_result[0]) == 80
| 4,314 | 31.689394 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/test_translate.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pycocotools.mask as maskUtils
import pytest
from mmcv.utils import build_from_cfg
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import PIPELINES
def _check_keys(results, results_translated):
assert len(set(results.keys()).difference(set(
results_translated.keys()))) == 0
assert len(set(results_translated.keys()).difference(set(
results.keys()))) == 0
def _pad(h, w, c, pad_val, axis=-1, dtype=np.float32):
assert isinstance(pad_val, (int, float, tuple))
if isinstance(pad_val, (int, float)):
pad_val = tuple([pad_val] * c)
assert len(pad_val) == c
pad_data = np.stack([np.ones((h, w)) * pad_val[i] for i in range(c)],
axis=axis).astype(dtype)
return pad_data
def _construct_img(results):
h, w = results['img_info']['height'], results['img_info']['width']
img = np.random.uniform(0, 1, (h, w, 3)) * 255
img = img.astype(np.uint8)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
def _construct_ann_info(h=427, w=640, c=3):
bboxes = np.array(
[[222.62, 217.82, 241.81, 238.93], [50.5, 329.7, 130.23, 384.96],
[175.47, 331.97, 254.8, 389.26]],
dtype=np.float32)
labels = np.array([9, 2, 2], dtype=np.int64)
bboxes_ignore = np.array([[59., 253., 311., 337.]], dtype=np.float32)
masks = [
[[222.62, 217.82, 222.62, 238.93, 241.81, 238.93, 240.85, 218.78]],
[[
69.19, 332.17, 82.39, 330.25, 97.24, 329.7, 114.01, 331.35, 116.76,
337.39, 119.78, 343.17, 128.03, 344.54, 128.86, 347.84, 124.18,
350.59, 129.96, 358.01, 130.23, 366.54, 129.13, 377.81, 125.28,
382.48, 119.78, 381.93, 117.31, 377.54, 116.21, 379.46, 114.83,
382.21, 107.14, 383.31, 105.49, 378.36, 77.99, 377.54, 75.79,
381.11, 69.74, 381.93, 66.72, 378.91, 65.07, 377.81, 63.15, 379.19,
62.32, 383.31, 52.7, 384.96, 50.5, 379.46, 51.32, 375.61, 51.6,
370.11, 51.6, 364.06, 53.52, 354.99, 56.27, 344.54, 59.57, 336.29,
66.45, 332.72
]],
[[
175.47, 386.86, 175.87, 376.44, 177.08, 351.2, 189.1, 332.77,
194.31, 331.97, 236.37, 332.77, 244.79, 342.39, 246.79, 346.79,
248.39, 345.99, 251.6, 345.59, 254.8, 348.0, 254.8, 351.6, 250.0,
352.0, 250.0, 354.81, 251.6, 358.41, 251.6, 364.42, 251.6, 370.03,
252.8, 378.04, 252.8, 384.05, 250.8, 387.26, 246.39, 387.66,
245.19, 386.46, 242.38, 388.86, 233.97, 389.26, 232.77, 388.06,
232.77, 383.65, 195.91, 381.25, 195.91, 384.86, 191.1, 384.86,
187.49, 385.26, 186.69, 382.85, 184.29, 382.45, 183.09, 387.26,
178.68, 388.46, 176.28, 387.66
]]
]
return dict(
bboxes=bboxes, labels=labels, bboxes_ignore=bboxes_ignore, masks=masks)
def _load_bboxes(results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
results['bbox_fields'] = ['gt_bboxes']
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
def _load_labels(results):
results['gt_labels'] = results['ann_info']['labels'].copy()
def _poly2mask(mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _process_polygons(polygons):
polygons = [np.array(p) for p in polygons]
valid_polygons = []
for polygon in polygons:
if len(polygon) % 2 == 0 and len(polygon) >= 6:
valid_polygons.append(polygon)
return valid_polygons
def _load_masks(results, poly2mask=True):
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if poly2mask:
gt_masks = BitmapMasks([_poly2mask(mask, h, w) for mask in gt_masks],
h, w)
else:
gt_masks = PolygonMasks(
[_process_polygons(polygons) for polygons in gt_masks], h, w)
results['gt_masks'] = gt_masks
results['mask_fields'] = ['gt_masks']
def _construct_semantic_seg(results):
h, w = results['img_info']['height'], results['img_info']['width']
seg_toy = (np.random.uniform(0, 1, (h, w)) * 255).astype(np.uint8)
results['gt_semantic_seg'] = seg_toy
results['seg_fields'] = ['gt_semantic_seg']
def construct_toy_data(poly2mask=True):
img_info = dict(height=427, width=640)
ann_info = _construct_ann_info(h=img_info['height'], w=img_info['width'])
results = dict(img_info=img_info, ann_info=ann_info)
# construct image, similar to 'LoadImageFromFile'
_construct_img(results)
# 'LoadAnnotations' (bboxes, labels, masks, semantic_seg)
_load_bboxes(results)
_load_labels(results)
_load_masks(results, poly2mask)
_construct_semantic_seg(results)
return results
def test_translate():
# test assertion for invalid value of level
with pytest.raises(AssertionError):
transform = dict(type='Translate', level=-1)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of level
with pytest.raises(AssertionError):
transform = dict(type='Translate', level=[1])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid prob
with pytest.raises(AssertionError):
transform = dict(type='Translate', level=1, prob=-0.5)
build_from_cfg(transform, PIPELINES)
# test assertion for the num of elements in tuple img_fill_val
with pytest.raises(AssertionError):
transform = dict(
type='Translate', level=1, img_fill_val=(128, 128, 128, 128))
build_from_cfg(transform, PIPELINES)
# test ValueError for invalid type of img_fill_val
with pytest.raises(ValueError):
transform = dict(
type='Translate', level=1, img_fill_val=[128, 128, 128])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid value of img_fill_val
with pytest.raises(AssertionError):
transform = dict(
type='Translate', level=1, img_fill_val=(128, -1, 256))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid value of direction
with pytest.raises(AssertionError):
transform = dict(
type='Translate', level=1, img_fill_val=128, direction='diagonal')
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of max_translate_offset
with pytest.raises(AssertionError):
transform = dict(
type='Translate',
level=1,
img_fill_val=128,
max_translate_offset=(250., ))
build_from_cfg(transform, PIPELINES)
# construct toy data example for unit test
results = construct_toy_data()
def _check_bbox_mask(results,
results_translated,
offset,
direction,
min_size=0.):
# The key correspondence from bboxes to labels and masks.
bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def _translate_bbox(bboxes, offset, direction, max_h, max_w):
if direction == 'horizontal':
bboxes[:, 0::2] = bboxes[:, 0::2] + offset
elif direction == 'vertical':
bboxes[:, 1::2] = bboxes[:, 1::2] + offset
else:
raise ValueError
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, max_w)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, max_h)
return bboxes
h, w, c = results_translated['img'].shape
for key in results_translated.get('bbox_fields', []):
label_key, mask_key = bbox2label[key], bbox2mask[key]
# check length of key
if label_key in results:
assert len(results_translated[key]) == len(
results_translated[label_key])
if mask_key in results:
assert len(results_translated[key]) == len(
results_translated[mask_key])
# construct gt_bboxes
gt_bboxes = _translate_bbox(
copy.deepcopy(results[key]), offset, direction, h, w)
valid_inds = (gt_bboxes[:, 2] - gt_bboxes[:, 0] > min_size) & (
gt_bboxes[:, 3] - gt_bboxes[:, 1] > min_size)
gt_bboxes = gt_bboxes[valid_inds]
# check bbox
assert np.equal(gt_bboxes, results_translated[key]).all()
# construct gt_masks
if mask_key not in results:
# e.g. 'gt_masks_ignore'
continue
masks, masks_translated = results[mask_key].to_ndarray(
), results_translated[mask_key].to_ndarray()
assert masks.dtype == masks_translated.dtype
if direction == 'horizontal':
masks_pad = _pad(
h,
abs(offset),
masks.shape[0],
0,
axis=0,
dtype=masks.dtype)
if offset <= 0:
# left shift
gt_masks = np.concatenate(
(masks[:, :, -offset:], masks_pad), axis=-1)
else:
# right shift
gt_masks = np.concatenate(
(masks_pad, masks[:, :, :-offset]), axis=-1)
else:
masks_pad = _pad(
abs(offset),
w,
masks.shape[0],
0,
axis=0,
dtype=masks.dtype)
if offset <= 0:
# top shift
gt_masks = np.concatenate(
(masks[:, -offset:, :], masks_pad), axis=1)
else:
# bottom shift
gt_masks = np.concatenate(
(masks_pad, masks[:, :-offset, :]), axis=1)
gt_masks = gt_masks[valid_inds]
# check masks
assert np.equal(gt_masks, masks_translated).all()
def _check_img_seg(results, results_translated, keys, offset, fill_val,
direction):
for key in keys:
assert isinstance(results_translated[key], type(results[key]))
# assert type(results[key]) == type(results_translated[key])
data, data_translated = results[key], results_translated[key]
if 'mask' in key:
data, data_translated = data.to_ndarray(
), data_translated.to_ndarray()
assert data.dtype == data_translated.dtype
if 'img' in key:
data, data_translated = data.transpose(
(2, 0, 1)), data_translated.transpose((2, 0, 1))
elif 'seg' in key:
data, data_translated = data[None, :, :], data_translated[
None, :, :]
c, h, w = data.shape
if direction == 'horizontal':
data_pad = _pad(
h, abs(offset), c, fill_val, axis=0, dtype=data.dtype)
if offset <= 0:
# left shift
data_gt = np.concatenate((data[:, :, -offset:], data_pad),
axis=-1)
else:
# right shift
data_gt = np.concatenate((data_pad, data[:, :, :-offset]),
axis=-1)
else:
data_pad = _pad(
abs(offset), w, c, fill_val, axis=0, dtype=data.dtype)
if offset <= 0:
# top shift
data_gt = np.concatenate((data[:, -offset:, :], data_pad),
axis=1)
else:
# bottom shift
data_gt = np.concatenate((data_pad, data[:, :-offset, :]),
axis=1)
if 'mask' in key:
# TODO assertion here. ``data_translated`` must be a subset
# (or equal) of ``data_gt``
pass
else:
assert np.equal(data_gt, data_translated).all()
def check_translate(results,
results_translated,
offset,
img_fill_val,
seg_ignore_label,
direction,
min_size=0):
# check keys
_check_keys(results, results_translated)
# check image
_check_img_seg(results, results_translated,
results.get('img_fields', ['img']), offset,
img_fill_val, direction)
# check segmentation map
_check_img_seg(results, results_translated,
results.get('seg_fields', []), offset, seg_ignore_label,
direction)
# check masks and bboxes
_check_bbox_mask(results, results_translated, offset, direction,
min_size)
# test case when level=0 (without translate aug)
img_fill_val = (104, 116, 124)
seg_ignore_label = 255
transform = dict(
type='Translate',
level=0,
prob=1.0,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
results_wo_translate = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results),
results_wo_translate,
0,
img_fill_val,
seg_ignore_label,
'horizontal',
)
# test case when level>0 and translate horizontally (left shift).
transform = dict(
type='Translate',
level=8,
prob=1.0,
img_fill_val=img_fill_val,
random_negative_prob=1.0,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results),
results_translated,
-offset,
img_fill_val,
seg_ignore_label,
'horizontal',
)
# test case when level>0 and translate horizontally (right shift).
translate_module.random_negative_prob = 0.0
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results),
results_translated,
offset,
img_fill_val,
seg_ignore_label,
'horizontal',
)
# test case when level>0 and translate vertically (top shift).
transform = dict(
type='Translate',
level=10,
prob=1.0,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
random_negative_prob=1.0,
direction='vertical')
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results), results_translated, -offset, img_fill_val,
seg_ignore_label, 'vertical')
# test case when level>0 and translate vertically (bottom shift).
translate_module.random_negative_prob = 0.0
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results), results_translated, offset, img_fill_val,
seg_ignore_label, 'vertical')
# test case when no translation is called (prob<=0)
transform = dict(
type='Translate',
level=8,
prob=0.0,
img_fill_val=img_fill_val,
random_negative_prob=0.0,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
results_translated = translate_module(copy.deepcopy(results))
# test translate vertically with PolygonMasks (top shift)
results = construct_toy_data(False)
transform = dict(
type='Translate',
level=10,
prob=1.0,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
direction='vertical')
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
translate_module.random_negative_prob = 1.0
results_translated = translate_module(copy.deepcopy(results))
def _translated_gt(masks, direction, offset, out_shape):
translated_masks = []
for poly_per_obj in masks:
translated_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if direction == 'horizontal':
p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
elif direction == 'vertical':
p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
if PolygonMasks([[p]], *out_shape).areas[0] > 0:
# filter invalid (area=0)
translated_poly_per_obj.append(p)
if len(translated_poly_per_obj):
translated_masks.append(translated_poly_per_obj)
translated_masks = PolygonMasks(translated_masks, *out_shape)
return translated_masks
h, w = results['img_shape'][:2]
for key in results.get('mask_fields', []):
masks = results[key]
translated_gt = _translated_gt(masks, 'vertical', -offset, (h, w))
assert np.equal(results_translated[key].to_ndarray(),
translated_gt.to_ndarray()).all()
# test translate horizontally with PolygonMasks (right shift)
results = construct_toy_data(False)
transform = dict(
type='Translate',
level=8,
prob=1.0,
img_fill_val=img_fill_val,
random_negative_prob=0.0,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
results_translated = translate_module(copy.deepcopy(results))
h, w = results['img_shape'][:2]
for key in results.get('mask_fields', []):
masks = results[key]
translated_gt = _translated_gt(masks, 'horizontal', offset, (h, w))
assert np.equal(results_translated[key].to_ndarray(),
translated_gt.to_ndarray()).all()
# test AutoAugment equipped with Translate
policies = [[dict(type='Translate', level=10, prob=1.)]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
policies = [[
dict(type='Translate', level=10, prob=1.),
dict(
type='Translate',
level=8,
img_fill_val=img_fill_val,
direction='vertical')
]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
| 19,985 | 37.65764 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_data/test_pipelines/test_transform/test_shear.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
from mmcv.utils import build_from_cfg
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import PIPELINES
from .utils import check_result_same, construct_toy_data
def test_shear():
# test assertion for invalid type of max_shear_magnitude
with pytest.raises(AssertionError):
transform = dict(type='Shear', level=1, max_shear_magnitude=(0.5, ))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid value of max_shear_magnitude
with pytest.raises(AssertionError):
transform = dict(type='Shear', level=2, max_shear_magnitude=1.2)
build_from_cfg(transform, PIPELINES)
# test ValueError for invalid type of img_fill_val
with pytest.raises(ValueError):
transform = dict(type='Shear', level=2, img_fill_val=[128])
build_from_cfg(transform, PIPELINES)
results = construct_toy_data()
# test case when no shear aug (level=0, direction='horizontal')
img_fill_val = (104, 116, 124)
seg_ignore_label = 255
transform = dict(
type='Shear',
level=0,
prob=1.,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
direction='horizontal')
shear_module = build_from_cfg(transform, PIPELINES)
results_wo_shear = shear_module(copy.deepcopy(results))
check_result_same(results, results_wo_shear)
# test case when no shear aug (level=0, direction='vertical')
transform = dict(
type='Shear',
level=0,
prob=1.,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
direction='vertical')
shear_module = build_from_cfg(transform, PIPELINES)
results_wo_shear = shear_module(copy.deepcopy(results))
check_result_same(results, results_wo_shear)
# test case when no shear aug (prob<=0)
transform = dict(
type='Shear',
level=10,
prob=0.,
img_fill_val=img_fill_val,
direction='vertical')
shear_module = build_from_cfg(transform, PIPELINES)
results_wo_shear = shear_module(copy.deepcopy(results))
check_result_same(results, results_wo_shear)
# test shear horizontally, magnitude=1
transform = dict(
type='Shear',
level=10,
prob=1.,
img_fill_val=img_fill_val,
direction='horizontal',
max_shear_magnitude=1.,
random_negative_prob=0.)
shear_module = build_from_cfg(transform, PIPELINES)
results_sheared = shear_module(copy.deepcopy(results))
results_gt = copy.deepcopy(results)
img_s = np.array([[1, 2, 3, 4], [0, 5, 6, 7]], dtype=np.uint8)
img_s = np.stack([img_s, img_s, img_s], axis=-1)
img_s[1, 0, :] = np.array(img_fill_val)
results_gt['img'] = img_s
results_gt['gt_bboxes'] = np.array([[0., 0., 3., 1.]], dtype=np.float32)
results_gt['gt_bboxes_ignore'] = np.array([[2., 0., 4., 1.]],
dtype=np.float32)
gt_masks = np.array([[0, 1, 1, 0], [0, 0, 1, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
results_gt['gt_semantic_seg'] = np.array(
[[1, 2, 3, 4], [255, 5, 6, 7]], dtype=results['gt_semantic_seg'].dtype)
check_result_same(results_gt, results_sheared)
# test PolygonMasks with shear horizontally, magnitude=1
results = construct_toy_data(poly2mask=False)
results_sheared = shear_module(copy.deepcopy(results))
print(results_sheared['gt_masks'])
gt_masks = [[np.array([0, 0, 2, 0, 3, 1, 1, 1], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_sheared)
# test shear vertically, magnitude=-1
img_fill_val = 128
results = construct_toy_data()
transform = dict(
type='Shear',
level=10,
prob=1.,
img_fill_val=img_fill_val,
direction='vertical',
max_shear_magnitude=1.,
random_negative_prob=1.)
shear_module = build_from_cfg(transform, PIPELINES)
results_sheared = shear_module(copy.deepcopy(results))
results_gt = copy.deepcopy(results)
img_s = np.array([[1, 6, img_fill_val, img_fill_val],
[5, img_fill_val, img_fill_val, img_fill_val]],
dtype=np.uint8)
img_s = np.stack([img_s, img_s, img_s], axis=-1)
results_gt['img'] = img_s
results_gt['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
results_gt['gt_labels'] = np.empty((0, ), dtype=np.int64)
results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32)
gt_masks = np.array([[0, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
results_gt['gt_semantic_seg'] = np.array(
[[1, 6, 255, 255], [5, 255, 255, 255]],
dtype=results['gt_semantic_seg'].dtype)
check_result_same(results_gt, results_sheared)
# test PolygonMasks with shear vertically, magnitude=-1
results = construct_toy_data(poly2mask=False)
results_sheared = shear_module(copy.deepcopy(results))
gt_masks = [[np.array([0, 0, 2, 0, 2, 0, 0, 1], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_sheared)
results = construct_toy_data()
# same mask for BitmapMasks and PolygonMasks
results['gt_masks'] = BitmapMasks(
np.array([[0, 1, 1, 0], [0, 1, 1, 0]], dtype=np.uint8)[None, :, :], 2,
4)
results['gt_bboxes'] = np.array([[1., 0., 2., 1.]], dtype=np.float32)
results_sheared_bitmap = shear_module(copy.deepcopy(results))
check_result_same(results_sheared_bitmap, results_sheared)
# test AutoAugment equipped with Shear
policies = [[dict(type='Shear', level=10, prob=1.)]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
policies = [[
dict(type='Shear', level=10, prob=1.),
dict(
type='Shear',
level=8,
img_fill_val=img_fill_val,
direction='vertical',
max_shear_magnitude=1.)
]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
| 6,481 | 38.284848 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_anchor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""
CommandLine:
pytest tests/test_utils/test_anchor.py
xdoctest tests/test_utils/test_anchor.py zero
"""
import pytest
import torch
def test_standard_points_generator():
from mmdet.core.anchor import build_prior_generator
# teat init
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
assert anchor_generator.num_base_priors == [1, 1]
# test_stride
from mmdet.core.anchor import MlvlPointGenerator
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
if torch.cuda.is_available():
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
def test_sparse_prior():
from mmdet.core.anchor import MlvlPointGenerator
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cpu')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert not sparse_prior.is_cuda
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.core.anchor import AnchorGenerator
mlvl_anchors = AnchorGenerator(
strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8])
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cpu')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
if torch.cuda.is_available():
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(6, 8), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cuda')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
assert sparse_prior.is_cuda
mlvl_anchors = AnchorGenerator(
strides=[16, 32],
ratios=[1., 2.5],
scales=[1., 5.],
base_sizes=[4, 8])
prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(13, 5), (16, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cuda')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
def test_standard_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator.num_base_priors == \
anchor_generator.num_base_anchors
assert anchor_generator.num_base_priors == [3, 3]
assert anchor_generator is not None
def test_strides():
from mmdet.core import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# min_sizes max_sizes must set at the same time
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=None,
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# length of min_sizes max_sizes must be the same
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=[100, 150, 202, 253],
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# test setting anchor size manually
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320],
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
expected_base_anchors = [
torch.Tensor([[-16.0000, -16.0000, 32.0000, 32.0000],
[-26.6410, -26.6410, 42.6410, 42.6410],
[-25.9411, -8.9706, 41.9411, 24.9706],
[-8.9706, -25.9411, 24.9706, 41.9411],
[-33.5692, -5.8564, 49.5692, 21.8564],
[-5.8564, -33.5692, 21.8564, 49.5692]]),
torch.Tensor([[-34.0000, -34.0000, 66.0000, 66.0000],
[-45.2372, -45.2372, 77.2372, 77.2372],
[-54.7107, -19.3553, 86.7107, 51.3553],
[-19.3553, -54.7107, 51.3553, 86.7107],
[-70.6025, -12.8675, 102.6025, 44.8675],
[-12.8675, -70.6025, 44.8675, 102.6025]]),
torch.Tensor([[-43.0000, -43.0000, 107.0000, 107.0000],
[-55.0345, -55.0345, 119.0345, 119.0345],
[-74.0660, -21.0330, 138.0660, 85.0330],
[-21.0330, -74.0660, 85.0330, 138.0660],
[-97.9038, -11.3013, 161.9038, 75.3013],
[-11.3013, -97.9038, 75.3013, 161.9038]]),
torch.Tensor([[-47.5000, -47.5000, 154.5000, 154.5000],
[-59.5332, -59.5332, 166.5332, 166.5332],
[-89.3356, -17.9178, 196.3356, 124.9178],
[-17.9178, -89.3356, 124.9178, 196.3356],
[-121.4371, -4.8124, 228.4371, 111.8124],
[-4.8124, -121.4371, 111.8124, 228.4371]]),
torch.Tensor([[-46.5000, -46.5000, 206.5000, 206.5000],
[-58.6651, -58.6651, 218.6651, 218.6651],
[-98.8980, -9.4490, 258.8980, 169.4490],
[-9.4490, -98.8980, 169.4490, 258.8980],
[-139.1044, 6.9652, 299.1044, 153.0348],
[6.9652, -139.1044, 153.0348, 299.1044]]),
torch.Tensor([[8.0000, 8.0000, 312.0000, 312.0000],
[4.0513, 4.0513, 315.9487, 315.9487],
[-54.9605, 52.5198, 374.9604, 267.4802],
[52.5198, -54.9605, 267.4802, 374.9604],
[-103.2717, 72.2428, 423.2717, 247.7572],
[72.2428, -103.2717, 247.7572, 423.2717]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [2400, 600, 150, 54, 24, 6]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (320, 320), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [6, 6, 6, 6, 6, 6]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
# test vgg ssd anchor setting
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = build_head(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = build_head(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
| 33,155 | 42.171875 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_visualization.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import visualization as vis
def test_color():
assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.)
assert vis.color_val_matplotlib('green') == (0., 1., 0.)
assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255)
assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255)
assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.)
# forbid white color
with pytest.raises(TypeError):
vis.color_val_matplotlib([255, 255, 255])
# forbid float
with pytest.raises(TypeError):
vis.color_val_matplotlib(1.0)
# overflowed
with pytest.raises(AssertionError):
vis.color_val_matplotlib((0, 0, 500))
def test_imshow_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape[:2]
os.remove(tmp_filename)
# test shaped (0,)
image = np.ones((10, 10, 3), np.uint8)
bbox = np.ones((0, 4))
label = np.ones((0, ))
vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test mask
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
segms = np.random.random((2, 10, 10)) > 0.5
segms = np.array(segms, np.int32)
vis.imshow_det_bboxes(
image, bbox, label, segms, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask type error
with pytest.raises(AttributeError):
segms = torch.tensor(segms)
vis.imshow_det_bboxes(image, bbox, label, segms, show=False)
def test_imshow_gt_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
out_image = vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test numpy mask
gt_mask = np.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask
gt_mask = torch.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test unsupported type
annotation['gt_masks'] = []
with pytest.raises(TypeError):
vis.imshow_gt_det_bboxes(image, annotation, result, show=False)
| 4,430 | 33.617188 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_general_data.py
|
import copy
import numpy as np
import pytest
import torch
from mmdet.core import GeneralData, InstanceData
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return a == b
def test_general_data():
# test init
meta_info = dict(
img_size=[256, 256],
path='dadfaff',
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
instance_data = GeneralData(meta_info=meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# test nice_repr
repr_instance_data = instance_data.new(data=data)
nice_repr = str(repr_instance_data)
for line in nice_repr.split('\n'):
if 'masks' in line:
assert 'shape' in line
assert '(4, 2, 2)' in line
if 'bboxes' in line:
assert 'shape' in line
assert 'torch.Size([4, 4])' in line
if 'path' in line:
assert 'dadfaff' in line
if 'scale_factor' in line:
assert '[1.5 1.5]' in line
instance_data = GeneralData(
meta_info=meta_info, data=dict(bboxes=torch.rand(5)))
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 5
# data should be a dict
with pytest.raises(AssertionError):
GeneralData(data=1)
# test set data
instance_data = GeneralData()
instance_data.set_data(data)
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 4
assert 'masks' in instance_data
assert len(instance_data.masks) == 4
# data should be a dict
with pytest.raises(AssertionError):
instance_data.set_data(data=1)
# test set_meta
instance_data = GeneralData()
instance_data.set_meta_info(meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# can skip same value when overwrite
instance_data.set_meta_info(meta_info)
# meta should be a dict
with pytest.raises(AssertionError):
instance_data.set_meta_info(meta_info='fjhka')
# attribute in `_meta_info_field` is immutable once initialized
instance_data.set_meta_info(meta_info)
# meta should be immutable
with pytest.raises(KeyError):
instance_data.set_meta_info(dict(img_size=[254, 251]))
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['path'] = 'dada'
instance_data.set_meta_info(duplicate_meta_info)
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['scale_factor'] = np.array([1.5, 1.6])
instance_data.set_meta_info(duplicate_meta_info)
# test new_instance_data
instance_data = GeneralData(meta_info)
new_instance_data = instance_data.new()
for k, v in instance_data.meta_info_items():
assert k in new_instance_data
_equal(v, new_instance_data[k])
instance_data = GeneralData(meta_info, data=data)
temp_meta = copy.deepcopy(meta_info)
temp_data = copy.deepcopy(data)
temp_data['time'] = '12212'
temp_meta['img_norm'] = np.random.random(3)
new_instance_data = instance_data.new(meta_info=temp_meta, data=temp_data)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, temp_meta[k])
assert k == 'img_norm'
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'time'
assert _equal(v, temp_data[k])
# test keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'bboxes' in instance_data.keys()
instance_data.b = 10
assert 'b' in instance_data
# test meta keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'path' in instance_data.meta_info_keys()
assert len(instance_data.meta_info_keys()) == len(meta_info)
instance_data.set_meta_info(dict(workdir='fafaf'))
assert 'workdir' in instance_data
assert len(instance_data.meta_info_keys()) == len(meta_info) + 1
# test values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 10 in instance_data.values()
assert len(instance_data.values()) == 1
# test meta values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
# torch 1.3 eq() can not compare str and tensor
from mmdet import digit_version
if digit_version(torch.__version__) >= [1, 4]:
assert 'dadfaff' in instance_data.meta_info_values()
assert len(instance_data.meta_info_values()) == len(meta_info)
# test items
instance_data = GeneralData(data=data)
for k, v in instance_data.items():
assert k in data
assert _equal(v, data[k])
# test meta_info_items
instance_data = GeneralData(meta_info=meta_info)
for k, v in instance_data.meta_info_items():
assert k in meta_info
assert _equal(v, meta_info[k])
# test __setattr__
new_instance_data = GeneralData(data=data)
new_instance_data.mask = torch.rand(3, 4, 5)
new_instance_data.bboxes = torch.rand(2, 4)
assert 'mask' in new_instance_data
assert len(new_instance_data.mask) == 3
assert len(new_instance_data.bboxes) == 2
# test instance_data_field has been updated
assert 'mask' in new_instance_data._data_fields
assert 'bboxes' in new_instance_data._data_fields
for k in data:
assert k in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
new_instance_data._data_fields = None
with pytest.raises(AttributeError):
new_instance_data._meta_info_fields = None
with pytest.raises(AttributeError):
del new_instance_data._data_fields
with pytest.raises(AttributeError):
del new_instance_data._meta_info_fields
# key in _meta_info_field is immutable
new_instance_data.set_meta_info(meta_info)
with pytest.raises(KeyError):
del new_instance_data.img_size
with pytest.raises(KeyError):
del new_instance_data.scale_factor
for k in new_instance_data.meta_info_keys():
with pytest.raises(AttributeError):
new_instance_data[k] = None
# test __delattr__
# test key can be removed in instance_data_field
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data.mask
assert 'mask' not in new_instance_data.keys()
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert not hasattr(new_instance_data, 'mask')
# tset __delitem__
new_instance_data.mask = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data['mask']
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
assert not hasattr(new_instance_data, 'mask')
# test __setitem__
new_instance_data['mask'] = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert hasattr(new_instance_data, 'mask')
# test data_fields has been updated
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
del new_instance_data['_data_fields']
with pytest.raises(AttributeError):
del new_instance_data['_meta_info_field']
# test __getitem__
new_instance_data.mask is new_instance_data['mask']
# test get
assert new_instance_data.get('mask') is new_instance_data.mask
assert new_instance_data.get('none_attribute', None) is None
assert new_instance_data.get('none_attribute', 1) == 1
# test pop
mask = new_instance_data.mask
assert new_instance_data.pop('mask') is mask
assert new_instance_data.pop('mask', None) is None
assert new_instance_data.pop('mask', 1) == 1
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(KeyError):
new_instance_data.pop('_data_fields')
with pytest.raises(KeyError):
new_instance_data.pop('_meta_info_field')
# attribute in `_meta_info_field` is immutable
with pytest.raises(KeyError):
new_instance_data.pop('img_size')
# test pop attribute in instance_data_filed
new_instance_data['mask'] = torch.rand(1, 2, 3)
new_instance_data.pop('mask')
# test data_field has been updated
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
# test_keys
new_instance_data.mask = torch.ones(1, 2, 3)
'mask' in new_instance_data.keys()
has_flag = False
for key in new_instance_data.keys():
if key == 'mask':
has_flag = True
assert has_flag
# test values
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.values()))
mask = new_instance_data.mask
has_flag = False
for value in new_instance_data.values():
if value is mask:
has_flag = True
assert has_flag
# test items
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.items()))
mask = new_instance_data.mask
has_flag = False
for key, value in new_instance_data.items():
if value is mask:
assert key == 'mask'
has_flag = True
assert has_flag
# test device
new_instance_data = GeneralData()
if torch.cuda.is_available():
newnew_instance_data = new_instance_data.new()
devices = ('cpu', 'cuda')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cpu()
for value in newnew_instance_data.values():
assert not value.is_cuda
newnew_instance_data = new_instance_data.new()
devices = ('cuda', 'cpu')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cuda()
for value in newnew_instance_data.values():
assert value.is_cuda
# test to
double_instance_data = instance_data.new()
double_instance_data.long = torch.LongTensor(1, 2, 3, 4)
double_instance_data.bool = torch.BoolTensor(1, 2, 3, 4)
double_instance_data = instance_data.to(torch.double)
for k, v in double_instance_data.items():
if isinstance(v, torch.Tensor):
assert v.dtype is torch.double
# test .cpu() .cuda()
if torch.cuda.is_available():
cpu_instance_data = double_instance_data.new()
cpu_instance_data.mask = torch.rand(1)
cuda_tensor = torch.rand(1, 2, 3).cuda()
cuda_instance_data = cpu_instance_data.to(cuda_tensor.device)
for value in cuda_instance_data.values():
assert value.is_cuda
cpu_instance_data = cuda_instance_data.cpu()
for value in cpu_instance_data.values():
assert not value.is_cuda
cuda_instance_data = cpu_instance_data.cuda()
for value in cuda_instance_data.values():
assert value.is_cuda
# test detach
grad_instance_data = double_instance_data.new()
grad_instance_data.mask = torch.rand(2, requires_grad=True)
grad_instance_data.mask_1 = torch.rand(2, requires_grad=True)
detach_instance_data = grad_instance_data.detach()
for value in detach_instance_data.values():
assert not value.requires_grad
# test numpy
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2, requires_grad=True)
tensor_instance_data.mask_1 = torch.rand(2, requires_grad=True)
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
if torch.cuda.is_available():
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2)
tensor_instance_data.mask_1 = torch.rand(2)
tensor_instance_data = tensor_instance_data.cuda()
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
instance_data['_c'] = 10000
instance_data.get('dad', None) is None
assert hasattr(instance_data, '_c')
del instance_data['_c']
assert not hasattr(instance_data, '_c')
instance_data.a = 1000
instance_data['a'] = 2000
assert instance_data['a'] == 2000
assert instance_data.a == 2000
assert instance_data.get('a') == instance_data['a'] == instance_data.a
instance_data._meta = 1000
assert '_meta' in instance_data.keys()
if torch.cuda.is_available():
instance_data.bbox = torch.ones(2, 3, 4, 5).cuda()
instance_data.score = torch.ones(2, 3, 4, 4)
else:
instance_data.bbox = torch.ones(2, 3, 4, 5)
assert len(instance_data.new().keys()) == 0
with pytest.raises(AttributeError):
instance_data.img_size = 100
for k, v in instance_data.items():
if k == 'bbox':
assert isinstance(v, torch.Tensor)
assert 'a' in instance_data
instance_data.pop('a')
assert 'a' not in instance_data
cpu_instance_data = instance_data.cpu()
for k, v in cpu_instance_data.items():
if isinstance(v, torch.Tensor):
assert not v.is_cuda
assert isinstance(cpu_instance_data.numpy().bbox, np.ndarray)
if torch.cuda.is_available():
cuda_resutls = instance_data.cuda()
for k, v in cuda_resutls.items():
if isinstance(v, torch.Tensor):
assert v.is_cuda
def test_instance_data():
meta_info = dict(
img_size=(256, 256),
path='dadfaff',
scale_factor=np.array([1.5, 1.5, 1, 1]))
data = dict(
bboxes=torch.rand(4, 4),
masks=torch.rand(4, 2, 2),
labels=np.random.rand(4),
size=[(i, i) for i in range(4)])
# test init
instance_data = InstanceData(meta_info)
assert 'path' in instance_data
instance_data = InstanceData(meta_info, data=data)
assert len(instance_data) == 4
instance_data.set_data(data)
assert len(instance_data) == 4
meta_info = copy.deepcopy(meta_info)
meta_info['img_name'] = 'flag'
# test newinstance_data
new_instance_data = instance_data.new(meta_info=meta_info)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, meta_info[k])
assert k == 'img_name'
# meta info is immutable
with pytest.raises(KeyError):
meta_info = copy.deepcopy(meta_info)
meta_info['path'] = 'fdasfdsd'
instance_data.new(meta_info=meta_info)
# data fields should have same length
with pytest.raises(AssertionError):
temp_data = copy.deepcopy(data)
temp_data['bboxes'] = torch.rand(5, 4)
instance_data.new(data=temp_data)
temp_data = copy.deepcopy(data)
temp_data['scores'] = torch.rand(4)
new_instance_data = instance_data.new(data=temp_data)
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'scores'
assert _equal(v, temp_data[k])
instance_data = instance_data.new()
# test __setattr__
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
# all attribute in instance_data_field should be
# (torch.Tensor, np.ndarray, list))
with pytest.raises(AssertionError):
instance_data.a = 1000
# instance_data field should has same length
new_instance_data = instance_data.new()
new_instance_data.det_bbox = torch.rand(100, 4)
new_instance_data.det_label = torch.arange(100)
with pytest.raises(AssertionError):
new_instance_data.scores = torch.rand(101, 1)
new_instance_data.none = [None] * 100
with pytest.raises(AssertionError):
new_instance_data.scores = [None] * 101
new_instance_data.numpy_det = np.random.random([100, 1])
with pytest.raises(AssertionError):
new_instance_data.scores = np.random.random([101, 1])
# isinstance(str, slice, int, torch.LongTensor, torch.BoolTensor)
item = torch.Tensor([1, 2, 3, 4])
with pytest.raises(AssertionError):
new_instance_data[item]
len(new_instance_data[item.long()]) == 1
# when input is a bool tensor, The shape of
# the input at index 0 should equal to
# the value length in instance_data_field
with pytest.raises(AssertionError):
new_instance_data[item.bool()]
for i in range(len(new_instance_data)):
assert new_instance_data[i].det_label == i
assert len(new_instance_data[i]) == 1
# assert the index should in 0 ~ len(instance_data) -1
with pytest.raises(IndexError):
new_instance_data[101]
# assert the index should not be an empty tensor
new_new_instance_data = new_instance_data.new()
with pytest.raises(AssertionError):
new_new_instance_data[0]
# test str
with pytest.raises(AssertionError):
instance_data.img_size_dummmy = meta_info['img_size']
# test slice
ten_ressults = new_instance_data[:10]
len(ten_ressults) == 10
for v in ten_ressults.values():
assert len(v) == 10
# test Longtensor
long_tensor = torch.randint(100, (50, ))
long_index_instance_data = new_instance_data[long_tensor]
assert len(long_index_instance_data) == len(long_tensor)
for key, value in long_index_instance_data.items():
if not isinstance(value, list):
assert (long_index_instance_data[key] == new_instance_data[key]
[long_tensor]).all()
else:
len(long_tensor) == len(value)
# test bool tensor
bool_tensor = torch.rand(100) > 0.5
bool_index_instance_data = new_instance_data[bool_tensor]
assert len(bool_index_instance_data) == bool_tensor.sum()
for key, value in bool_index_instance_data.items():
if not isinstance(value, list):
assert (bool_index_instance_data[key] == new_instance_data[key]
[bool_tensor]).all()
else:
assert len(value) == bool_tensor.sum()
num_instance = 1000
instance_data_list = []
# assert len(instance_lists) > 0
with pytest.raises(AssertionError):
instance_data.cat(instance_data_list)
for _ in range(2):
instance_data['bbox'] = torch.rand(num_instance, 4)
instance_data['label'] = torch.rand(num_instance, 1)
instance_data['mask'] = torch.rand(num_instance, 224, 224)
instance_data['instances_infos'] = [1] * num_instance
instance_data['cpu_bbox'] = np.random.random((num_instance, 4))
if torch.cuda.is_available():
instance_data.cuda_tensor = torch.rand(num_instance).cuda()
assert instance_data.cuda_tensor.is_cuda
cuda_instance_data = instance_data.cuda()
assert cuda_instance_data.cuda_tensor.is_cuda
assert len(instance_data[0]) == 1
with pytest.raises(IndexError):
return instance_data[num_instance + 1]
with pytest.raises(AssertionError):
instance_data.centerness = torch.rand(num_instance + 1, 1)
mask_tensor = torch.rand(num_instance) > 0.5
length = mask_tensor.sum()
assert len(instance_data[mask_tensor]) == length
index_tensor = torch.LongTensor([1, 5, 8, 110, 399])
length = len(index_tensor)
assert len(instance_data[index_tensor]) == length
instance_data_list.append(instance_data)
cat_resutls = InstanceData.cat(instance_data_list)
assert len(cat_resutls) == num_instance * 2
instances = InstanceData(data=dict(bboxes=torch.rand(4, 4)))
# cat only single instance
assert len(InstanceData.cat([instances])) == 4
| 21,205 | 34.820946 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_nms.py
|
import pytest
import torch
from mmdet.core.post_processing import mask_matrix_nms
def _create_mask(N, h, w):
masks = torch.rand((N, h, w)) > 0.5
labels = torch.rand(N)
scores = torch.rand(N)
return masks, labels, scores
def test_nms_input_errors():
with pytest.raises(AssertionError):
mask_matrix_nms(
torch.rand((10, 28, 28)), torch.rand(11), torch.rand(11))
with pytest.raises(AssertionError):
masks = torch.rand((10, 28, 28))
mask_matrix_nms(
masks,
torch.rand(11),
torch.rand(11),
mask_area=masks.sum((1, 2)).float()[:8])
with pytest.raises(NotImplementedError):
mask_matrix_nms(
torch.rand((10, 28, 28)),
torch.rand(10),
torch.rand(10),
kernel='None')
# test an empty results
masks, labels, scores = _create_mask(0, 28, 28)
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 0
# do not use update_thr, nms_pre and max_num
masks, labels, scores = _create_mask(1000, 28, 28)
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 1000
# only use nms_pre
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores, nms_pre=500)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 500
# use max_num
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores,
nms_pre=500, max_num=100)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 100
masks, labels, _ = _create_mask(1, 28, 28)
scores = torch.Tensor([1.0])
masks = masks.expand(1000, 28, 28)
labels = labels.expand(1000)
scores = scores.expand(1000)
# assert scores is decayed and update_thr is worked
# if with the same mask, label, and all scores = 1
# the first score will set to 1, others will decay.
score, label, mask, keep_ind = \
mask_matrix_nms(masks,
labels,
scores,
nms_pre=500,
max_num=100,
kernel='gaussian',
sigma=2.0,
filter_thr=0.5)
assert len(score) == 1
assert score[0] == 1
| 2,528 | 32.276316 | 69 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
TBLRBBoxCoder, YOLOBBoxCoder)
def test_yolo_bbox_coder():
coder = YOLOBBoxCoder()
bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
pred_bboxes = torch.Tensor([[0.4709, 0.6152, 0.1690, -0.4056],
[0.5399, 0.6653, 0.1162, -0.4162],
[0.4654, 0.6618, 0.1548, -0.4301],
[0.4786, 0.6197, 0.1896, -0.4479]])
grid_size = 32
expected_decode_bboxes = torch.Tensor(
[[-53.6102, -10.3096, 83.7478, 49.6824],
[-15.8700, -8.3901, 114.4236, 50.9693],
[11.1822, -8.0924, 146.6034, 50.4476],
[41.2068, -8.9232, 181.4236, 48.5840]])
assert expected_decode_bboxes.allclose(
coder.decode(bboxes, pred_bboxes, grid_size))
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
def test_tblr_bbox_coder():
coder = TBLRBBoxCoder(normalizer=15.)
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.5000, 0.5000, 0.5000, 0.5000],
[0.0000, 0.0000, 12.0000, 13.0000],
[0.0000, 0.5000, 0.0000, 0.5000],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(13, 12))
assert expected_decode_bboxes.allclose(out)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((13, 12)))
assert expected_decode_bboxes.allclose(out)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(13, 12))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(13, 12), (13, 12)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(batch_rois, batch_deltas, max_shape=[(13, 12)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
def test_distance_point_bbox_coder():
coder = DistancePointBBoxCoder()
points = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
gt_bboxes = torch.Tensor([[74., 61., 75., 62.], [0., 104., 0., 112.],
[100., 90., 100., 120.], [0., 120., 100., 120.]])
expected_distance = torch.Tensor([[0., 0., 1., 1.], [0., 2., 29., 6.],
[38., 0., 0., 50.], [29., 50., 50., 0.]])
out_distance = coder.encode(points, gt_bboxes, max_dis=50, eps=0)
assert expected_distance.allclose(out_distance)
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
out_bbox = coder.decode(points, distance, max_shape=(120, 100))
assert gt_bboxes.allclose(out_bbox)
| 5,779 | 44.15625 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_misc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import (center_of_mass, filter_scores_and_topk,
flip_tensor, mask2ndarray, select_single_mlvl)
from mmdet.utils import find_latest_checkpoint
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, height, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(height, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
@pytest.mark.parametrize('mask', [
torch.ones((28, 28)),
torch.zeros((28, 28)),
torch.rand(28, 28) > 0.5,
torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
])
def test_center_of_mass(mask):
center_h, center_w = center_of_mass(mask)
if mask.shape[0] == 4:
assert center_h == 1.5
assert center_w == 1.5
assert isinstance(center_h, torch.Tensor) \
and isinstance(center_w, torch.Tensor)
assert 0 <= center_h <= 28 \
and 0 <= center_w <= 28
def test_flip_tensor():
img = np.random.random((1, 3, 10, 10))
src_tensor = torch.from_numpy(img)
# test flip_direction parameter error
with pytest.raises(AssertionError):
flip_tensor(src_tensor, 'flip')
# test tensor dimension
with pytest.raises(AssertionError):
flip_tensor(src_tensor[0], 'vertical')
hfilp_tensor = flip_tensor(src_tensor, 'horizontal')
expected_hflip_tensor = torch.from_numpy(img[..., ::-1, :].copy())
expected_hflip_tensor.allclose(hfilp_tensor)
vfilp_tensor = flip_tensor(src_tensor, 'vertical')
expected_vflip_tensor = torch.from_numpy(img[..., ::-1].copy())
expected_vflip_tensor.allclose(vfilp_tensor)
diag_filp_tensor = flip_tensor(src_tensor, 'diagonal')
expected_diag_filp_tensor = torch.from_numpy(img[..., ::-1, ::-1].copy())
expected_diag_filp_tensor.allclose(diag_filp_tensor)
def test_select_single_mlvl():
mlvl_tensors = [torch.rand(2, 1, 10, 10)] * 5
mlvl_tensor_list = select_single_mlvl(mlvl_tensors, 1)
assert len(mlvl_tensor_list) == 5 and mlvl_tensor_list[0].ndim == 3
def test_filter_scores_and_topk():
score = torch.tensor([[0.1, 0.3, 0.2], [0.12, 0.7, 0.9], [0.02, 0.8, 0.08],
[0.4, 0.1, 0.08]])
bbox_pred = torch.tensor([[0.2, 0.3], [0.4, 0.7], [0.1, 0.1], [0.5, 0.1]])
score_thr = 0.15
nms_pre = 4
# test results type error
with pytest.raises(NotImplementedError):
filter_scores_and_topk(score, score_thr, nms_pre, (score, ))
filtered_results = filter_scores_and_topk(
score, score_thr, nms_pre, results=dict(bbox_pred=bbox_pred))
filtered_score, labels, keep_idxs, results = filtered_results
assert filtered_score.allclose(torch.tensor([0.9, 0.8, 0.7, 0.4]))
assert labels.allclose(torch.tensor([2, 1, 1, 0]))
assert keep_idxs.allclose(torch.tensor([1, 2, 1, 3]))
assert results['bbox_pred'].allclose(
torch.tensor([[0.4, 0.7], [0.1, 0.1], [0.4, 0.7], [0.5, 0.1]]))
def test_find_latest_checkpoint():
with tempfile.TemporaryDirectory() as tmpdir:
path = tmpdir
latest = find_latest_checkpoint(path)
# There are no checkpoints in the path.
assert latest is None
path = tmpdir + '/none'
latest = find_latest_checkpoint(path)
# The path does not exist.
assert latest is None
with tempfile.TemporaryDirectory() as tmpdir:
with open(tmpdir + '/latest.pth', 'w') as f:
f.write('latest')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == tmpdir + '/latest.pth'
with tempfile.TemporaryDirectory() as tmpdir:
with open(tmpdir + '/iter_4000.pth', 'w') as f:
f.write('iter_4000')
with open(tmpdir + '/iter_8000.pth', 'w') as f:
f.write('iter_8000')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == tmpdir + '/iter_8000.pth'
with tempfile.TemporaryDirectory() as tmpdir:
with open(tmpdir + '/epoch_1.pth', 'w') as f:
f.write('epoch_1')
with open(tmpdir + '/epoch_2.pth', 'w') as f:
f.write('epoch_2')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == tmpdir + '/epoch_2.pth'
| 7,267 | 34.627451 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_version.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet import digit_version
def test_version_check():
assert digit_version('1.0.5') > digit_version('1.0.5rc0')
assert digit_version('1.0.5') > digit_version('1.0.4rc0')
assert digit_version('1.0.5') > digit_version('1.0rc0')
assert digit_version('1.0.0') > digit_version('0.6.2')
assert digit_version('1.0.0') > digit_version('0.2.16')
assert digit_version('1.0.5rc0') > digit_version('1.0.0rc0')
assert digit_version('1.0.0rc1') > digit_version('1.0.0rc0')
assert digit_version('1.0.0rc2') > digit_version('1.0.0rc0')
assert digit_version('1.0.0rc2') > digit_version('1.0.0rc1')
assert digit_version('1.0.1rc1') > digit_version('1.0.0rc1')
assert digit_version('1.0.0') > digit_version('1.0.0rc1')
| 798 | 46 | 64 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_masks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core import BitmapMasks, PolygonMasks
def dummy_raw_bitmap_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (H, W) or (N, H, W)
Return:
ndarray: dummy mask
"""
return np.random.randint(0, 2, size, dtype=np.uint8)
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, height, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(height, width), num_points)])
return polygons
def dummy_bboxes(num, max_height, max_width):
x1y1 = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
wh = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
x2y2 = x1y1 + wh
return np.concatenate([x1y1, x2y2], axis=1).squeeze().astype(np.float32)
def test_bitmap_mask_init():
# init with empty ndarray masks
raw_masks = np.empty((0, 28, 28), dtype=np.uint8)
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with empty list masks
raw_masks = []
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with ndarray masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with list masks contain 3 instances
raw_masks = [dummy_raw_bitmap_masks((28, 28)) for _ in range(3)]
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[dummy_raw_bitmap_masks((28, 28))]]
BitmapMasks(raw_masks, 28, 28)
def test_bitmap_mask_rescale():
# rescale with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
rescaled_masks = bitmap_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
# rescale with bitmap masks contain 1 instances
raw_masks = np.array([[[1, 0, 0, 0], [0, 1, 0, 1]]])
bitmap_masks = BitmapMasks(raw_masks, 2, 4)
rescaled_masks = bitmap_masks.rescale((8, 8))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 4
assert rescaled_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1]]])
assert (rescaled_masks.masks == truth).all()
def test_bitmap_mask_resize():
# resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
resized_masks = bitmap_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
# resize with bitmap masks contain 1 instances
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((8, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 8
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
# resize to non-square
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((4, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 4
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
def test_bitmap_mask_get_bboxes():
# resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
bboxes = bitmap_masks.get_bboxes()
assert len(bboxes) == 0
# resize with bitmap masks contain 1 instances
raw_masks = np.array([[[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0,
0]]])
bitmap_masks = BitmapMasks(raw_masks, 8, 8)
bboxes = bitmap_masks.get_bboxes()
assert len(bboxes) == 1
truth = np.array([[1, 1, 6, 6]])
assert (bboxes == truth).all()
# resize to non-square
raw_masks = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0,
0]]])
bitmap_masks = BitmapMasks(raw_masks, 4, 8)
bboxes = bitmap_masks.get_bboxes()
truth = np.array([[0, 0, 6, 3]])
assert (bboxes == truth).all()
def test_bitmap_mask_flip():
# flip with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
# horizontally flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert flipped_masks.masks.shape == (3, 28, 28)
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, :, ::-1]).all()
# vertically flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, :]).all()
# diagonal flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, ::-1]).all()
def test_bitmap_mask_pad():
# pad with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
# pad with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert (padded_masks.masks[:, 28:, 28:] == 0).all()
def test_bitmap_mask_crop():
# crop with empty bitmap masks
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
# crop with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 3
assert cropped_masks.height == 17
assert cropped_masks.width == 10
x1, y1, x2, y2 = dummy_bbox
assert (cropped_masks.masks == raw_masks[:, y1:y2, x1:x2]).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
bitmap_masks.crop(dummy_bbox)
def test_bitmap_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
# crop and resize with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
def test_bitmap_mask_expand():
# expand with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 0
assert expanded_masks.height == 56
assert expanded_masks.width == 56
# expand with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 3
assert expanded_masks.height == 56
assert expanded_masks.width == 56
assert (expanded_masks.masks[:, :12, :14] == 0).all()
assert (expanded_masks.masks[:, 12 + 28:, 14 + 28:] == 0).all()
def test_bitmap_mask_area():
# area of empty bitmap mask
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert bitmap_masks.areas.sum() == 0
# area of bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
areas = bitmap_masks.areas
assert len(areas) == 3
assert (areas == raw_masks.sum((1, 2))).all()
def test_bitmap_mask_to_ndarray():
# empty bitmap masks to ndarray
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to ndarray
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
assert (ndarray_masks == raw_masks).all()
def test_bitmap_mask_to_tensor():
# empty bitmap masks to tensor
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to tensor
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == raw_masks).all()
def test_bitmap_mask_index():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert (bitmap_masks[0].masks == raw_masks[0]).all()
assert (bitmap_masks[range(2)].masks == raw_masks[range(2)]).all()
def test_bitmap_mask_iter():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
for i, bitmap_mask in enumerate(bitmap_masks):
assert bitmap_mask.shape == (28, 28)
assert (bitmap_mask == raw_masks[i]).all()
def test_polygon_mask_init():
# init with empty masks
raw_masks = []
polygon_masks = BitmapMasks(raw_masks, 28, 28)
assert len(polygon_masks) == 0
assert polygon_masks.height == 28
assert polygon_masks.width == 28
# init with masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
assert isinstance(polygon_masks.masks, list)
assert isinstance(polygon_masks.masks[0], list)
assert isinstance(polygon_masks.masks[0][0], np.ndarray)
assert len(polygon_masks) == 3
assert polygon_masks.height == 28
assert polygon_masks.width == 28
assert polygon_masks.to_ndarray().shape == (3, 28, 28)
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[[]]]
PolygonMasks(raw_masks, 28, 28)
raw_masks = [dummy_raw_polygon_masks((3, 28, 28))]
PolygonMasks(raw_masks, 28, 28)
def test_polygon_mask_rescale():
# rescale with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
rescaled_masks = polygon_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
assert rescaled_masks.to_ndarray().shape == (0, 56, 56)
# rescale with polygon masks contain 3 instances
raw_masks = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks = PolygonMasks(raw_masks, 5, 5)
rescaled_masks = polygon_masks.rescale((12, 10))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 10
assert rescaled_masks.width == 10
assert rescaled_masks.to_ndarray().shape == (1, 10, 10)
truth = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
np.uint8)
assert (rescaled_masks.to_ndarray() == truth).all()
def test_polygon_mask_resize():
# resize with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
resized_masks = polygon_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
assert resized_masks.to_ndarray().shape == (0, 56, 72)
assert len(resized_masks.get_bboxes()) == 0
# resize with polygon masks contain 1 instance 1 part
raw_masks1 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks1 = PolygonMasks(raw_masks1, 5, 5)
resized_masks1 = polygon_masks1.resize((10, 10))
assert len(resized_masks1) == 1
assert resized_masks1.height == 10
assert resized_masks1.width == 10
assert resized_masks1.to_ndarray().shape == (1, 10, 10)
truth1 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
np.uint8)
assert (resized_masks1.to_ndarray() == truth1).all()
bboxes = resized_masks1.get_bboxes()
bbox_truth = np.array([[2, 2, 8, 8]])
assert (bboxes == bbox_truth).all()
# resize with polygon masks contain 1 instance 2 part
raw_masks2 = [[
np.array([0., 0., 1., 0., 1., 1.]),
np.array([1., 1., 2., 1., 2., 2., 1., 2.])
]]
polygon_masks2 = PolygonMasks(raw_masks2, 3, 3)
resized_masks2 = polygon_masks2.resize((6, 6))
assert len(resized_masks2) == 1
assert resized_masks2.height == 6
assert resized_masks2.width == 6
assert resized_masks2.to_ndarray().shape == (1, 6, 6)
truth2 = np.array(
[[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], np.uint8)
assert (resized_masks2.to_ndarray() == truth2).all()
# resize with polygon masks contain 2 instances
raw_masks3 = [raw_masks1[0], raw_masks2[0]]
polygon_masks3 = PolygonMasks(raw_masks3, 5, 5)
resized_masks3 = polygon_masks3.resize((10, 10))
assert len(resized_masks3) == 2
assert resized_masks3.height == 10
assert resized_masks3.width == 10
assert resized_masks3.to_ndarray().shape == (2, 10, 10)
truth3 = np.stack([truth1, np.pad(truth2, ((0, 4), (0, 4)), 'constant')])
assert (resized_masks3.to_ndarray() == truth3).all()
# resize to non-square
raw_masks4 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks4 = PolygonMasks(raw_masks4, 5, 5)
resized_masks4 = polygon_masks4.resize((5, 10))
assert len(resized_masks4) == 1
assert resized_masks4.height == 5
assert resized_masks4.width == 10
assert resized_masks4.to_ndarray().shape == (1, 5, 10)
truth4 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8)
assert (resized_masks4.to_ndarray() == truth4).all()
def test_polygon_mask_flip():
# flip with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (0, 28, 28)
# TODO: fixed flip correctness checking after v2.0_coord is merged
# horizontally flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
# vertically flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
# diagonal flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
def test_polygon_mask_crop():
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
# crop with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_masks = polygon_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
assert cropped_masks.to_ndarray().shape == (0, 17, 10)
# crop with polygon masks contain 1 instances
raw_masks = [[np.array([1., 3., 5., 1., 5., 6., 1, 6])]]
polygon_masks = PolygonMasks(raw_masks, 7, 7)
bbox = np.array([0, 0, 3, 4])
cropped_masks = polygon_masks.crop(bbox)
assert len(cropped_masks) == 1
assert cropped_masks.height == 4
assert cropped_masks.width == 3
assert cropped_masks.to_ndarray().shape == (1, 4, 3)
truth = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 1, 1]])
assert (cropped_masks.to_ndarray() == truth).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
polygon_masks.crop(dummy_bbox)
def test_polygon_mask_pad():
# pad with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
padded_masks = polygon_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
assert padded_masks.to_ndarray().shape == (0, 56, 56)
# pad with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
padded_masks = polygon_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert padded_masks.to_ndarray().shape == (3, 56, 56)
assert (padded_masks.to_ndarray()[:, 28:, 28:] == 0).all()
def test_polygon_mask_expand():
with pytest.raises(NotImplementedError):
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
polygon_masks.expand(56, 56, 10, 17)
def test_polygon_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_resized_masks = polygon_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
assert cropped_resized_masks.to_ndarray().shape == (0, 56, 56)
# crop and resize with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_resized_masks = polygon_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
assert cropped_resized_masks.to_ndarray().shape == (5, 56, 56)
def test_polygon_mask_area():
# area of empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
assert polygon_masks.areas.sum() == 0
# area of polygon masks contain 1 instance
# here we hack a case that the gap between the area of bitmap and polygon
# is minor
raw_masks = [[np.array([1, 1, 5, 1, 3, 4])]]
polygon_masks = PolygonMasks(raw_masks, 6, 6)
polygon_area = polygon_masks.areas
bitmap_area = polygon_masks.to_bitmap().areas
assert len(polygon_area) == 1
assert np.isclose(polygon_area, bitmap_area).all()
def test_polygon_mask_to_bitmap():
# polygon masks contain 3 instances to bitmap
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
bitmap_masks = polygon_masks.to_bitmap()
assert (polygon_masks.to_ndarray() == bitmap_masks.to_ndarray()).all()
def test_polygon_mask_to_ndarray():
# empty polygon masks to ndarray
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
ndarray_masks = polygon_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# polygon masks contain 3 instances to ndarray
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
ndarray_masks = polygon_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
def test_polygon_to_tensor():
# empty polygon masks to tensor
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# polygon masks contain 3 instances to tensor
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == polygon_masks.to_ndarray()).all()
def test_polygon_mask_index():
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
# index by integer
polygon_masks[0]
# index by list
polygon_masks[[0, 1]]
# index by ndarray
polygon_masks[np.asarray([0, 1])]
with pytest.raises(ValueError):
# invalid index
polygon_masks[torch.Tensor([1, 2])]
def test_polygon_mask_iter():
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
for i, polygon_mask in enumerate(polygon_masks):
assert np.equal(polygon_mask, raw_masks[i]).all()
| 27,461 | 38.8 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import shutil
import sys
import tempfile
from unittest.mock import MagicMock, call
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import (CheckpointHook, IterTimerHook, PaviLoggerHook,
build_runner)
from torch.nn.init import constant_
from torch.utils.data import DataLoader, Dataset
from mmdet.core.hook import ExpMomentumEMAHook, YOLOXLrUpdaterHook
from mmdet.core.hook.sync_norm_hook import SyncNormHook
from mmdet.core.hook.sync_random_size_hook import SyncRandomSizeHook
def _build_demo_runner_without_hook(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None,
multi_optimziers=False):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
if multi_optimziers:
optimizer = {
'model1':
torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95),
'model2':
torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9),
}
else:
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(
dict(type=runner_type),
default_args=dict(
model=model,
work_dir=tmp_dir,
optimizer=optimizer,
logger=logging.getLogger(),
max_epochs=max_epochs,
max_iters=max_iters))
return runner
def _build_demo_runner(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None,
multi_optimziers=False):
log_config = dict(
interval=1, hooks=[
dict(type='TextLoggerHook'),
])
runner = _build_demo_runner_without_hook(runner_type, max_epochs,
max_iters, multi_optimziers)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
@pytest.mark.parametrize('multi_optimziers', (True, False))
def test_yolox_lrupdater_hook(multi_optimziers):
"""xdoctest -m tests/test_hooks.py test_cosine_runner_hook."""
# Only used to prevent program errors
YOLOXLrUpdaterHook(0, min_lr_ratio=0.05)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimziers=multi_optimziers)
hook_cfg = dict(
type='YOLOXLrUpdaterHook',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=15,
min_lr_ratio=0.05)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
# add pavi hook
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
# TODO: use a more elegant way to check values
assert hasattr(hook, 'writer')
if multi_optimziers:
calls = [
call(
'train', {
'learning_rate/model1': 8.000000000000001e-06,
'learning_rate/model2': 4.000000000000001e-06,
'momentum/model1': 0.95,
'momentum/model2': 0.9
}, 1),
call(
'train', {
'learning_rate/model1': 0.00039200000000000004,
'learning_rate/model2': 0.00019600000000000002,
'momentum/model1': 0.95,
'momentum/model2': 0.9
}, 7),
call(
'train', {
'learning_rate/model1': 0.0008000000000000001,
'learning_rate/model2': 0.0004000000000000001,
'momentum/model1': 0.95,
'momentum/model2': 0.9
}, 10)
]
else:
calls = [
call('train', {
'learning_rate': 8.000000000000001e-06,
'momentum': 0.95
}, 1),
call('train', {
'learning_rate': 0.00039200000000000004,
'momentum': 0.95
}, 7),
call('train', {
'learning_rate': 0.0008000000000000001,
'momentum': 0.95
}, 10)
]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
def test_ema_hook():
"""xdoctest -m tests/test_hooks.py test_ema_hook."""
class DemoModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=1,
padding=1,
bias=True)
self.bn = nn.BatchNorm2d(2)
self._init_weight()
def _init_weight(self):
constant_(self.conv.weight, 0)
constant_(self.conv.bias, 0)
constant_(self.bn.weight, 0)
constant_(self.bn.bias, 0)
def forward(self, x):
return self.bn(self.conv(x)).sum()
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
loader = DataLoader(torch.ones((1, 1, 1, 1)))
runner = _build_demo_runner()
demo_model = DemoModel()
runner.model = demo_model
ema_hook = ExpMomentumEMAHook(
momentum=0.0002,
total_iter=1,
skip_buffers=True,
interval=2,
resume_from=None)
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(ema_hook, priority='HIGHEST')
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth')
num_eam_params = 0
for name, value in checkpoint['state_dict'].items():
if 'ema' in name:
num_eam_params += 1
value.fill_(1)
assert num_eam_params == 4
torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth')
work_dir = runner.work_dir
resume_ema_hook = ExpMomentumEMAHook(
momentum=0.5,
total_iter=10,
skip_buffers=True,
interval=1,
resume_from=f'{work_dir}/epoch_1.pth')
runner = _build_demo_runner(max_epochs=2)
runner.model = demo_model
runner.register_hook(resume_ema_hook, priority='HIGHEST')
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth')
num_eam_params = 0
desired_output = [0.9094, 0.9094]
for name, value in checkpoint['state_dict'].items():
if 'ema' in name:
num_eam_params += 1
assert value.sum() == 2
else:
if ('weight' in name) or ('bias' in name):
np.allclose(value.data.cpu().numpy().reshape(-1),
desired_output, 1e-4)
assert num_eam_params == 4
shutil.rmtree(runner.work_dir)
shutil.rmtree(work_dir)
def test_sync_norm_hook():
# Only used to prevent program errors
SyncNormHook()
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
runner.register_hook_from_cfg(dict(type='SyncNormHook'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
def test_sync_random_size_hook():
# Only used to prevent program errors
SyncRandomSizeHook()
class DemoDataset(Dataset):
def __getitem__(self, item):
return torch.ones(2)
def __len__(self):
return 5
def update_dynamic_scale(self, dynamic_scale):
pass
loader = DataLoader(DemoDataset())
runner = _build_demo_runner()
runner.register_hook_from_cfg(
dict(type='SyncRandomSizeHook', device='cpu'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
if torch.cuda.is_available():
runner = _build_demo_runner()
runner.register_hook_from_cfg(
dict(type='SyncRandomSizeHook', device='cuda'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
@pytest.mark.parametrize('set_loss', [
dict(set_loss_nan=False, set_loss_inf=False),
dict(set_loss_nan=True, set_loss_inf=False),
dict(set_loss_nan=False, set_loss_inf=True)
])
def test_check_invalid_loss_hook(set_loss):
# Check whether loss is valid during training.
class DemoModel(nn.Module):
def __init__(self, set_loss_nan=False, set_loss_inf=False):
super().__init__()
self.set_loss_nan = set_loss_nan
self.set_loss_inf = set_loss_inf
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
if self.set_loss_nan:
return dict(loss=torch.tensor(float('nan')))
elif self.set_loss_inf:
return dict(loss=torch.tensor(float('inf')))
else:
return dict(loss=self(x))
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
demo_model = DemoModel(**set_loss)
runner.model = demo_model
runner.register_hook_from_cfg(
dict(type='CheckInvalidLossHook', interval=1))
if not set_loss['set_loss_nan'] \
and not set_loss['set_loss_inf']:
# check loss is valid
runner.run([loader], [('train', 1)])
else:
# check loss is nan or inf
with pytest.raises(AssertionError):
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
def test_set_epoch_info_hook():
"""Test SetEpochInfoHook."""
class DemoModel(nn.Module):
def __init__(self):
super().__init__()
self.epoch = 0
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def set_epoch(self, epoch):
self.epoch = epoch
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner(max_epochs=3)
demo_model = DemoModel()
runner.model = demo_model
runner.register_hook_from_cfg(dict(type='SetEpochInfoHook'))
runner.run([loader], [('train', 1)])
assert demo_model.epoch == 2
| 11,278 | 30.771831 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_utils/test_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Tests the Assigner objects.
CommandLine:
pytest tests/test_utils/test_assigner.py
xdoctest tests/test_utils/test_assigner.py zero
"""
import pytest
import torch
from mmdet.core.bbox.assigners import (ApproxMaxIoUAssigner,
CenterRegionAssigner, HungarianAssigner,
MaxIoUAssigner, PointAssigner,
TaskAlignedAssigner, UniformAssigner)
def test_max_iou_assigner():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = self.assign(
bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_boxes():
"""Test corner case where a network might predict no boxes."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_ignore():
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_labels=gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(
bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_gt():
"""Test corner case where a network might predict no boxes and no gt."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_point_assigner():
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt():
"""Test corner case where an image might predict no points and no gt."""
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_boxes():
"""Test corner case where an network might predict no boxes."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner_with_empty_boxes_and_gt():
"""Test corner case where an network might predict no boxes and no gt."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_random_assign_result():
"""Test random instantiation of assign result to catch corner cases."""
from mmdet.core.bbox.assigners.assign_result import AssignResult
AssignResult.random()
AssignResult.random(num_gts=0, num_preds=0)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=3, num_preds=3)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=7, num_preds=7)
AssignResult.random(num_gts=7, num_preds=64)
AssignResult.random(num_gts=24, num_preds=3)
def test_center_region_assigner():
self = CenterRegionAssigner(pos_scale=0.3, neg_scale=1)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [8, 8, 9,
9]])
gt_bboxes = torch.FloatTensor([
[0, 0, 11, 11], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
[4.5, 4.5, 5.5, 5.5], # match bboxes[0] but area is too small
[0, 0, 10, 10], # match bboxes[1] and has a smaller area than gt[0]
])
gt_labels = torch.LongTensor([2, 3, 4, 5])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 3
assert len(assign_result.labels) == 3
expected_gt_inds = torch.LongTensor([4, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
# [8, 8, 9, 9] in the shadowed region of [0, 0, 11, 11] (label: 2)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 2]]))
# [8, 8, 9, 9] in the shadowed region of [0, 0, 10, 10] (label: 5)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 5]]))
# [0, 0, 10, 10] is already assigned to [4.5, 4.5, 5.5, 5.5].
# Therefore, [0, 0, 11, 11] (label: 2) is shadowed
assert torch.any(shadowed_labels == torch.LongTensor([[0, 2]]))
def test_center_region_assigner_with_ignore():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
])
gt_bboxes_ignore = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
])
gt_labels = torch.LongTensor([1, 2])
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 2
assert len(assign_result.labels) == 2
expected_gt_inds = torch.LongTensor([-1, 2])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_center_region_assigner_with_empty_bboxes():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.empty((0, 4)).float()
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
])
gt_labels = torch.LongTensor([1, 2])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert assign_result.gt_inds is None or assign_result.gt_inds.numel() == 0
assert assign_result.labels is None or assign_result.labels.numel() == 0
def test_center_region_assigner_with_empty_gts():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
])
gt_bboxes = torch.empty((0, 4)).float()
gt_labels = torch.empty((0, )).long()
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 2
expected_gt_inds = torch.LongTensor([0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_hungarian_match_assigner():
self = HungarianAssigner()
assert self.iou_cost.iou_mode == 'giou'
# test no gt bboxes
bbox_pred = torch.rand((10, 4))
cls_pred = torch.rand((10, 81))
gt_bboxes = torch.empty((0, 4)).float()
gt_labels = torch.empty((0, )).long()
img_meta = dict(img_shape=(10, 8, 3))
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds == 0)
assert torch.all(assign_result.labels == -1)
# test with gt bboxes
gt_bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_labels = torch.LongTensor([1, 20])
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
# test iou mode
self = HungarianAssigner(
iou_cost=dict(type='IoUCost', iou_mode='iou', weight=1.0))
assert self.iou_cost.iou_mode == 'iou'
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
# test focal loss mode
self = HungarianAssigner(
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0),
cls_cost=dict(type='FocalLossCost', weight=1.))
assert self.iou_cost.iou_mode == 'giou'
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
def test_uniform_assigner():
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(
pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([-1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_uniform_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
assign_result = self.assign(pred_bbox, anchor, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_uniform_assigner_with_empty_boxes():
"""Test corner case where a network might predict no boxes."""
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.empty((0, 4))
anchor = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(pred_bbox, anchor, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
def test_task_aligned_assigner():
with pytest.raises(AssertionError):
TaskAlignedAssigner(topk=0)
self = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
assign_result = self.assign(
pred_score,
pred_bbox,
anchor,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2)
assign_result = self.assign(
pred_score, pred_bbox, anchor, gt_bboxes=gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
| 17,434 | 31.167897 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_downstream/test_mmtrack.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from collections import defaultdict
import numpy as np
import pytest
import torch
from mmcv import Config
@pytest.mark.parametrize(
'cfg_file',
['./tests/data/configs_mmtrack/selsa_faster_rcnn_r101_dc5_1x.py'])
def test_vid_fgfa_style_forward(cfg_file):
config = Config.fromfile(cfg_file)
model = copy.deepcopy(config.model)
model.pretrains = None
model.detector.pretrained = None
from mmtrack.models import build_model
detector = build_model(model)
# Test forward train with a non-empty truth batch
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
img_metas[0]['is_video_data'] = True
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
ref_input_shape = (2, 3, 256, 256)
ref_mm_inputs = _demo_mm_inputs(ref_input_shape, num_items=[9, 11])
ref_img = ref_mm_inputs.pop('imgs')[None]
ref_img_metas = ref_mm_inputs.pop('img_metas')
ref_img_metas[0]['is_video_data'] = True
ref_img_metas[1]['is_video_data'] = True
ref_gt_bboxes = ref_mm_inputs['gt_bboxes']
ref_gt_labels = ref_mm_inputs['gt_labels']
ref_gt_masks = ref_mm_inputs['gt_masks']
losses = detector.forward(
img=imgs,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
ref_img=ref_img,
ref_img_metas=[ref_img_metas],
ref_gt_bboxes=ref_gt_bboxes,
ref_gt_labels=ref_gt_labels,
gt_masks=gt_masks,
ref_gt_masks=ref_gt_masks,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
img_metas[0]['is_video_data'] = True
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
ref_mm_inputs = _demo_mm_inputs(ref_input_shape, num_items=[0, 0])
ref_imgs = ref_mm_inputs.pop('imgs')[None]
ref_img_metas = ref_mm_inputs.pop('img_metas')
ref_img_metas[0]['is_video_data'] = True
ref_img_metas[1]['is_video_data'] = True
ref_gt_bboxes = ref_mm_inputs['gt_bboxes']
ref_gt_labels = ref_mm_inputs['gt_labels']
ref_gt_masks = ref_mm_inputs['gt_masks']
losses = detector.forward(
img=imgs,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
ref_img=ref_imgs,
ref_img_metas=[ref_img_metas],
ref_gt_bboxes=ref_gt_bboxes,
ref_gt_labels=ref_gt_labels,
gt_masks=gt_masks,
ref_gt_masks=ref_gt_masks,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward test with frame_stride=1 and frame_range=[-1,0]
with torch.no_grad():
imgs = torch.cat([imgs, imgs.clone()], dim=0)
img_list = [g[None, :] for g in imgs]
img_metas.extend(copy.deepcopy(img_metas))
for i in range(len(img_metas)):
img_metas[i]['frame_id'] = i
img_metas[i]['num_left_ref_imgs'] = 1
img_metas[i]['frame_stride'] = 1
ref_imgs = [ref_imgs.clone(), imgs[[0]][None].clone()]
ref_img_metas = [
copy.deepcopy(ref_img_metas),
copy.deepcopy([img_metas[0]])
]
results = defaultdict(list)
for one_img, one_meta, ref_img, ref_img_meta in zip(
img_list, img_metas, ref_imgs, ref_img_metas):
result = detector.forward([one_img], [[one_meta]],
ref_img=[ref_img],
ref_img_metas=[[ref_img_meta]],
return_loss=False)
for k, v in result.items():
results[k].append(v)
@pytest.mark.parametrize('cfg_file', [
'./tests/data/configs_mmtrack/tracktor_faster-rcnn_r50_fpn_4e.py',
])
def test_tracktor_forward(cfg_file):
config = Config.fromfile(cfg_file)
model = copy.deepcopy(config.model)
model.pretrains = None
model.detector.pretrained = None
from mmtrack.models import build_model
mot = build_model(model)
mot.eval()
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10], with_track=True)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
with torch.no_grad():
imgs = torch.cat([imgs, imgs.clone()], dim=0)
img_list = [g[None, :] for g in imgs]
img2_metas = copy.deepcopy(img_metas)
img2_metas[0]['frame_id'] = 1
img_metas.extend(img2_metas)
results = defaultdict(list)
for one_img, one_meta in zip(img_list, img_metas):
result = mot.forward([one_img], [[one_meta]], return_loss=False)
for k, v in result.items():
results[k].append(v)
def _demo_mm_inputs(
input_shape=(1, 3, 300, 300),
num_items=None,
num_classes=10,
with_track=False):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
'frame_id': 0,
'img_norm_cfg': {
'mean': (128.0, 128.0, 128.0),
'std': (10.0, 10.0, 10.0)
}
} for i in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
gt_match_indices = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
if with_track:
gt_match_indices.append(torch.arange(boxes.shape[0]))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_track:
mm_inputs['gt_match_indices'] = gt_match_indices
return mm_inputs
| 7,592 | 31.87013 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_metrics/test_recall.py
|
import numpy as np
from mmdet.core.evaluation.recall import eval_recalls
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_eval_recalls():
gts = [gt_bboxes, gt_bboxes, gt_bboxes]
proposals = [det_bboxes, det_bboxes, det_bboxes]
recall = eval_recalls(
gts, proposals, proposal_nums=2, use_legacy_coordinate=True)
assert recall.shape == (1, 1)
assert 0.66 < recall[0][0] < 0.667
recall = eval_recalls(
gts, proposals, proposal_nums=2, use_legacy_coordinate=False)
assert recall.shape == (1, 1)
assert 0.66 < recall[0][0] < 0.667
recall = eval_recalls(
gts, proposals, proposal_nums=2, use_legacy_coordinate=True)
assert recall.shape == (1, 1)
assert 0.66 < recall[0][0] < 0.667
recall = eval_recalls(
gts,
proposals,
iou_thrs=[0.1, 0.9],
proposal_nums=2,
use_legacy_coordinate=False)
assert recall.shape == (1, 2)
assert recall[0][1] <= recall[0][0]
recall = eval_recalls(
gts,
proposals,
iou_thrs=[0.1, 0.9],
proposal_nums=2,
use_legacy_coordinate=True)
assert recall.shape == (1, 2)
assert recall[0][1] <= recall[0][0]
| 1,377 | 28.319149 | 73 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_metrics/test_mean_ap.py
|
import numpy as np
from mmdet.core.evaluation.mean_ap import eval_map, tpfp_default, tpfp_imagenet
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
eval_map(det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
| 2,327 | 25.454545 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_metrics/test_losses.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models import Accuracy, build_loss
def test_ce_loss():
# use_mask and use_sigmoid cannot be true at the same time
with pytest.raises(AssertionError):
loss_cfg = dict(
type='CrossEntropyLoss',
use_mask=True,
use_sigmoid=True,
loss_weight=1.0)
build_loss(loss_cfg)
# test loss with class weights
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=[0.8, 0.2],
loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))
loss_cls_cfg = dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
def test_varifocal_loss():
# only sigmoid version of VarifocalLoss is implemented
with pytest.raises(AssertionError):
loss_cfg = dict(
type='VarifocalLoss', use_sigmoid=False, loss_weight=1.0)
build_loss(loss_cfg)
# test that alpha should be greater than 0
with pytest.raises(AssertionError):
loss_cfg = dict(
type='VarifocalLoss',
alpha=-0.75,
gamma=2.0,
use_sigmoid=True,
loss_weight=1.0)
build_loss(loss_cfg)
# test that pred and target should be of the same size
loss_cls_cfg = dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[100.0, -100.0]])
fake_target = torch.Tensor([[1.0]])
loss_cls(fake_pred, fake_target)
# test the calculation
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100.0, -100.0]])
fake_target = torch.Tensor([[1.0, 0.0]])
assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0))
# test the loss with weights
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[0.0, 100.0]])
fake_target = torch.Tensor([[1.0, 1.0]])
fake_weight = torch.Tensor([0.0, 1.0])
assert torch.allclose(
loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0))
def test_kd_loss():
# test that temperature should be greater than 1
with pytest.raises(AssertionError):
loss_cfg = dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=1.0, T=0.5)
build_loss(loss_cfg)
# test that pred and target should be of the same size
loss_cls_cfg = dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=1.0, T=1)
loss_cls = build_loss(loss_cls_cfg)
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
loss_cls(fake_pred, fake_label)
# test the calculation
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100.0, 100.0]])
fake_target = torch.Tensor([[1.0, 1.0]])
assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0))
# test the loss with weights
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100.0, -100.0], [100.0, 100.0]])
fake_target = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
fake_weight = torch.Tensor([0.0, 1.0])
assert torch.allclose(
loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0))
def test_seesaw_loss():
# only softmax version of Seesaw Loss is implemented
with pytest.raises(AssertionError):
loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0)
build_loss(loss_cfg)
# test that cls_score.size(-1) == num_classes + 2
loss_cls_cfg = dict(
type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
# the length of fake_pred should be num_classes + 2 = 4
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[-100, 100]])
fake_label = torch.Tensor([1]).long()
loss_cls(fake_pred, fake_label)
# the length of fake_pred should be num_classes + 2 = 4
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[-100, 100, -100]])
fake_label = torch.Tensor([1]).long()
loss_cls(fake_pred, fake_label)
# test the calculation without p and q
loss_cls_cfg = dict(
type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[-100, 100, -100, 100]])
fake_label = torch.Tensor([1]).long()
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
assert torch.allclose(loss['loss_cls_classes'], torch.tensor(0.))
# test the calculation with p and without q
loss_cls_cfg = dict(
type='SeesawLoss', p=1.0, q=0.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[-100, 100, -100, 100]])
fake_label = torch.Tensor([0]).long()
loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20]))
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
assert torch.allclose(loss['loss_cls_classes'], torch.tensor(180.))
# test the calculation with q and without p
loss_cls_cfg = dict(
type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[-100, 100, -100, 100]])
fake_label = torch.Tensor([0]).long()
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
assert torch.allclose(loss['loss_cls_classes'],
torch.tensor(200.) + torch.tensor(100.).log())
# test the others
loss_cls_cfg = dict(
type='SeesawLoss',
p=0.0,
q=1.0,
loss_weight=1.0,
num_classes=2,
return_dict=False)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100, -100, 100, -100]])
fake_label = torch.Tensor([0]).long()
loss = loss_cls(fake_pred, fake_label)
acc = loss_cls.get_accuracy(fake_pred, fake_label)
act = loss_cls.get_activation(fake_pred)
assert torch.allclose(loss, torch.tensor(0.))
assert torch.allclose(acc['acc_objectness'], torch.tensor(100.))
assert torch.allclose(acc['acc_classes'], torch.tensor(100.))
assert torch.allclose(act, torch.tensor([1., 0., 0.]))
def test_accuracy():
# test for empty pred
pred = torch.empty(0, 4)
label = torch.empty(0)
accuracy = Accuracy(topk=1)
acc = accuracy(pred, label)
assert acc.item() == 0
pred = torch.Tensor([[0.2, 0.3, 0.6, 0.5], [0.1, 0.1, 0.2, 0.6],
[0.9, 0.0, 0.0, 0.1], [0.4, 0.7, 0.1, 0.1],
[0.0, 0.0, 0.99, 0]])
# test for top1
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1)
acc = accuracy(pred, true_label)
assert acc.item() == 100
# test for top1 with score thresh=0.8
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1, thresh=0.8)
acc = accuracy(pred, true_label)
assert acc.item() == 40
# test for top2
accuracy = Accuracy(topk=2)
label = torch.Tensor([3, 2, 0, 0, 2]).long()
acc = accuracy(pred, label)
assert acc.item() == 100
# test for both top1 and top2
accuracy = Accuracy(topk=(1, 2))
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
acc = accuracy(pred, true_label)
for a in acc:
assert a.item() == 100
# topk is larger than pred class number
with pytest.raises(AssertionError):
accuracy = Accuracy(topk=5)
accuracy(pred, true_label)
# wrong topk type
with pytest.raises(AssertionError):
accuracy = Accuracy(topk='wrong type')
accuracy(pred, true_label)
# label size is larger than required
with pytest.raises(AssertionError):
label = torch.Tensor([2, 3, 0, 1, 2, 0]).long() # size mismatch
accuracy = Accuracy()
accuracy(pred, label)
# wrong pred dimension
with pytest.raises(AssertionError):
accuracy = Accuracy()
accuracy(pred[:, :, None], true_label)
| 8,694 | 34.929752 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/tests/test_metrics/test_box_overlap.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core import BboxOverlaps2D, bbox_overlaps
from mmdet.core.evaluation.bbox_overlaps import \
bbox_overlaps as recall_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
def test_voc_recall_overlaps():
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes.numpy(), num_bbox
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
ious = recall_overlaps(
bboxes1, bboxes2, 'iou', use_legacy_coordinate=False)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
ious = recall_overlaps(bboxes1, bboxes2, 'iou', use_legacy_coordinate=True)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
| 5,316 | 38.385185 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/demo/video_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame)
frame = model.show_result(frame, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 1,974 | 30.854839 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/demo/create_result_gif.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(mmcv.scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
| 4,930 | 29.067073 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/demo/webcam_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import torch
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
if __name__ == '__main__':
main()
| 1,308 | 26.270833 | 78 |
py
|
DSLA-DSLA
|
DSLA-DSLA/demo/image_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(model, args.img, result[0], score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
| 1,719 | 32.72549 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py
|
_base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 423 | 27.266667 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py
|
_base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 201 | 27.857143 | 61 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
loss_cls=dict(
_delete_=True,
type='GHMC',
bins=30,
momentum=0.75,
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
_delete_=True,
type='GHMR',
mu=0.02,
bins=10,
momentum=0.7,
loss_weight=10.0)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| 532 | 25.65 | 60 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py
|
_base_ = './retinanet_ghm_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 423 | 27.266667 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
| 242 | 29.375 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 216 | 35.166667 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 212 | 34.5 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 217 | 35.333333 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 216 | 35.166667 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='DeformRoIPoolPack',
output_size=7,
output_channels=256),
out_channels=256,
featmap_strides=[4, 8, 16, 32])))
| 408 | 30.461538 | 56 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 210 | 34.166667 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 228 | 37.166667 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
| 240 | 29.125 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 557 | 31.823529 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 221 | 36 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 222 | 36.166667 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 214 | 34.833333 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 216 | 35.166667 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 215 | 35 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
|
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 211 | 34.333333 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='ModulatedDeformRoIPoolPack',
output_size=7,
output_channels=256),
out_channels=256,
featmap_strides=[4, 8, 16, 32])))
| 417 | 31.153846 | 56 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 591 | 28.6 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_r50_fpn_20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 140 | 27.2 | 53 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='HybridTaskCascade',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
]),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
| 8,333 | 34.164557 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 591 | 28.6 | 76 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
data = dict(
samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 1,489 | 32.863636 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_r50_fpn_1x_coco.py
|
_base_ = './htc_without_semantic_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))))
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(
seg_prefix=data_root + 'stuffthingmaps/train2017/',
pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 1,998 | 34.070175 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/htc/htc_r101_fpn_20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
| 295 | 28.6 | 61 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py
|
_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs)
data = dict(train=dict(times=4 * 4))
lr_config = dict(warmup_iters=500 * 4)
| 261 | 36.428571 | 74 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs)
data = dict(train=dict(times=2))
| 208 | 33.833333 | 66 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
| 893 | 37.869565 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# use FP16
fp16 = dict(loss_scale=512.)
| 107 | 26 | 66 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py
|
_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
fp16 = dict(loss_scale=512.)
| 102 | 33.333333 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
image_size = (1024, 1024)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=image_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 2,703 | 32.382716 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 340 | 36.888889 | 72 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 217 | 30.142857 | 61 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepPointsDetector',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RepPointsHead',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
optimizer = dict(lr=0.01)
| 2,065 | 29.382353 | 79 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
model = dict(bbox_head=dict(transform_method='partial_minmax'))
| 126 | 41.333333 | 63 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py
|
_base_ = './reppoints_moment_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
optimizer = dict(lr=0.01)
| 215 | 42.2 | 77 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
model = dict(
bbox_head=dict(transform_method='minmax', use_grid_points=True),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(
_delete_=True,
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1))))
| 452 | 31.357143 | 68 |
py
|
DSLA-DSLA
|
DSLA-DSLA/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
| 148 | 36.25 | 61 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.