repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_scnet_bbox_head.py
|
import unittest
import torch
from mmdet.models.roi_heads.bbox_heads import SCNetBBoxHead
class TestSCNetBBoxHead(unittest.TestCase):
def test_forward(self):
x = torch.rand((2, 1, 16, 16))
bbox_head = SCNetBBoxHead(
num_shared_fcs=2,
in_channels=1,
roi_feat_size=16,
conv_out_channels=1,
fc_out_channels=256,
)
results = bbox_head(x, return_shared_feat=False)
self.assertEqual(len(results), 2)
results = bbox_head(x, return_shared_feat=True)
self.assertEqual(len(results), 3)
| 600 | 25.130435 | 59 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_double_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads.bbox_heads import DoubleConvFCBBoxHead
class TestDoubleBboxHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
double_bbox_head = DoubleConvFCBBoxHead(
num_convs=4,
num_fcs=2,
in_channels=1,
conv_out_channels=4,
fc_out_channels=4)
double_bbox_head = double_bbox_head.to(device=device)
num_samples = 4
feats = torch.rand((num_samples, 1, 7, 7)).to(device)
double_bbox_head(x_cls=feats, x_reg=feats)
| 876 | 28.233333 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_sabl_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.roi_heads.bbox_heads import SABLHead
from mmdet.models.task_modules.samplers import SamplingResult
class TestSABLBboxHead(TestCase):
def test_init(self):
bbox_head = SABLHead(
cls_in_channels=1,
cls_out_channels=1,
reg_in_channels=1,
reg_offset_out_channels=1,
reg_cls_out_channels=1,
num_classes=4)
self.assertTrue(bbox_head.fc_cls)
self.assertTrue(hasattr(bbox_head, 'reg_cls_fcs'))
self.assertTrue(hasattr(bbox_head, 'reg_offset_fcs'))
self.assertFalse(hasattr(bbox_head, 'fc_reg'))
def test_bbox_head_get_results(self):
num_classes = 6
bbox_head = SABLHead(reg_class_agnostic=True, num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [(torch.rand(
(num_samples, 28)), torch.rand((num_samples, 28)))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(len(result_list[0]), num_samples * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].scores.shape, cls_scores[0].shape)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [(torch.rand(
(num_samples, 28)), torch.rand((num_samples, 28)))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertIsNone(result_list[0].get('label', None))
def test_bbox_head_refine_bboxes(self):
num_classes = 8
bbox_head = SABLHead(reg_class_agnostic=True, num_classes=num_classes)
s = 20
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
sampling_results = [SamplingResult.random()]
num_samples = 20
rois = torch.rand((num_samples, 4))
roi_img_ids = torch.zeros(num_samples, 1)
rois = torch.cat((roi_img_ids, rois), dim=1)
cls_scores = torch.rand((num_samples, num_classes + 1))
cls_preds = torch.rand((num_samples, 28))
offset_preds = torch.rand((num_samples, 28))
labels = torch.randint(0, num_classes + 1, (num_samples, )).long()
bbox_targets = (labels, None, None, None)
bbox_results = dict(
rois=rois,
bbox_pred=(cls_preds, offset_preds),
cls_score=cls_scores,
bbox_targets=bbox_targets)
bbox_list = bbox_head.refine_bboxes(
sampling_results=sampling_results,
bbox_results=bbox_results,
batch_img_metas=img_metas)
self.assertGreaterEqual(num_samples, len(bbox_list[0]))
self.assertIsInstance(bbox_list[0], InstanceData)
self.assertEqual(bbox_list[0].bboxes.shape[1], 4)
| 5,052 | 35.615942 | 78 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.roi_heads.bbox_heads import (BBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from mmdet.models.task_modules.samplers import SamplingResult
class TestBboxHead(TestCase):
def test_init(self):
# Shared2FCBBoxHead
bbox_head = Shared2FCBBoxHead(
in_channels=1, fc_out_channels=1, num_classes=4)
self.assertTrue(bbox_head.fc_cls)
self.assertTrue(bbox_head.fc_reg)
self.assertEqual(len(bbox_head.shared_fcs), 2)
# Shared4Conv1FCBBoxHead
bbox_head = Shared4Conv1FCBBoxHead(
in_channels=1, fc_out_channels=1, num_classes=4)
self.assertTrue(bbox_head.fc_cls)
self.assertTrue(bbox_head.fc_reg)
self.assertEqual(len(bbox_head.shared_convs), 4)
self.assertEqual(len(bbox_head.shared_fcs), 1)
def test_bbox_head_get_results(self):
num_classes = 6
bbox_head = BBoxHead(reg_class_agnostic=True, num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [torch.rand((num_samples, 4))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(len(result_list[0]), num_samples * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].bboxes.shape, bbox_preds[0].shape)
self.assertEqual(result_list[0].scores.shape, cls_scores[0].shape)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [torch.rand((num_samples, 4))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape, bbox_preds[0].shape)
self.assertIsNone(result_list[0].get('label', None))
def test_bbox_head_refine_bboxes(self):
num_classes = 6
bbox_head = BBoxHead(reg_class_agnostic=True, num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
sampling_results = [SamplingResult.random()]
num_samples = 20
rois = torch.rand((num_samples, 4))
roi_img_ids = torch.zeros(num_samples, 1)
rois = torch.cat((roi_img_ids, rois), dim=1)
cls_scores = torch.rand((num_samples, num_classes + 1))
bbox_preds = torch.rand((num_samples, 4))
labels = torch.randint(0, num_classes + 1, (num_samples, )).long()
bbox_targets = (labels, None, None, None)
bbox_results = dict(
rois=rois,
bbox_pred=bbox_preds,
cls_score=cls_scores,
bbox_targets=bbox_targets)
bbox_list = bbox_head.refine_bboxes(
sampling_results=sampling_results,
bbox_results=bbox_results,
batch_img_metas=img_metas)
self.assertGreaterEqual(num_samples, len(bbox_list[0]))
self.assertIsInstance(bbox_list[0], InstanceData)
self.assertEqual(bbox_list[0].bboxes.shape[1], 4)
| 5,287 | 36.503546 | 78 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_multi_instance_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.roi_heads.bbox_heads import MultiInstanceBBoxHead
class TestMultiInstanceBBoxHead(TestCase):
def test_init(self):
bbox_head = MultiInstanceBBoxHead(
num_instance=2,
with_refine=True,
num_shared_fcs=2,
in_channels=1,
fc_out_channels=1,
num_classes=4)
self.assertTrue(bbox_head.shared_fcs_ref)
self.assertTrue(bbox_head.fc_reg)
self.assertTrue(bbox_head.fc_cls)
self.assertEqual(len(bbox_head.shared_fcs), 2)
self.assertEqual(len(bbox_head.fc_reg), 2)
self.assertEqual(len(bbox_head.fc_cls), 2)
def test_bbox_head_get_results(self):
num_classes = 1
num_instance = 2
bbox_head = MultiInstanceBBoxHead(
num_instance=num_instance,
num_shared_fcs=2,
reg_class_agnostic=True,
num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = []
bbox_preds = []
for k in range(num_instance):
cls_scores.append(torch.rand((num_samples, num_classes + 1)))
bbox_preds.append(torch.rand((num_samples, 4)))
cls_scores = [torch.cat(cls_scores, dim=1)]
bbox_preds = [torch.cat(bbox_preds, dim=1)]
# with nms
rcnn_test_cfg = ConfigDict(
nms=dict(type='nms', iou_threshold=0.5),
score_thr=0.01,
max_per_img=500)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(
len(result_list[0]), num_samples * num_instance * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples * num_instance)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = []
bbox_preds = []
for k in range(num_instance):
cls_scores.append(torch.rand((num_samples, num_classes + 1)))
bbox_preds.append(torch.rand((num_samples, 4)))
cls_scores = [torch.cat(cls_scores, dim=1)]
bbox_preds = [torch.cat(bbox_preds, dim=1)]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0 * num_instance)
self.assertIsNone(result_list[0].get('label', None))
| 4,203 | 34.327731 | 74 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_fused_semantic_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
class TestFusedSemanticHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
semantic_head = FusedSemanticHead(
num_ins=5,
fusion_level=1,
in_channels=4,
conv_out_channels=4,
num_classes=6)
feats = [
torch.rand((1, 4, 32 // 2**(i + 1), 32 // 2**(i + 1)))
for i in range(5)
]
mask_pred, x = semantic_head(feats)
labels = torch.randint(0, 6, (1, 1, 64, 64))
loss = semantic_head.loss(mask_pred, labels)
self.assertIsInstance(loss, Tensor)
| 1,013 | 28.823529 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_maskiou_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import MaskIoUHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.structures.mask import mask_target
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestMaskIoUHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_loss_and_target(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
train_cfg = ConfigDict(dict(mask_size=28, mask_thr_binary=0.5))
# prepare ground truth
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(batch_data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare mask feats, pred and target
pos_proposals = [res.pos_priors for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
gt_masks = [res.masks for res in batch_gt_instances]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, train_cfg)
mask_feats = torch.rand((mask_targets.size(0), 256, 14, 14)).to(device)
mask_preds = torch.rand((mask_targets.size(0), 4, 28, 28)).to(device)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
pos_mask_pred = mask_preds[range(mask_preds.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(mask_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
mask_iou_head.loss_and_target(pos_mask_iou_pred, pos_mask_pred,
mask_targets, sampling_results,
batch_gt_instances, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
mask_preds = torch.rand((num_samples, num_classes, 28, 28)).to(device)
mask_iou_preds = mask_iou_head(
mask_feats, mask_preds[range(results.labels.size(0)),
results.labels])
mask_iou_head.predict_by_feat(
mask_iou_preds=[mask_iou_preds], results_list=[results])
| 4,094 | 39.544554 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_scnet_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import SCNetMaskHead
class TestSCNetMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = SCNetMaskHead(
conv_to_res=True,
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
x = torch.rand((1, 1, 10, 10))
results = mask_head(x)
self.assertIsInstance(results, Tensor)
| 828 | 26.633333 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_global_context_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import GlobalContextHead
class TestGlobalContextHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
head = GlobalContextHead(
num_convs=1, in_channels=4, conv_out_channels=4, num_classes=10)
feats = [
torch.rand((1, 4, 64 // 2**(i + 1), 64 // 2**(i + 1)))
for i in range(5)
]
mc_pred, x = head(feats)
labels = [torch.randint(0, 10, (10, ))]
loss = head.loss(mc_pred, labels)
self.assertIsInstance(loss, Tensor)
| 917 | 28.612903 | 76 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_scnet_semantic_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import SCNetSemanticHead
class TestSCNetSemanticHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
semantic_head = SCNetSemanticHead(
num_ins=5,
fusion_level=1,
in_channels=4,
conv_out_channels=4,
num_classes=6)
feats = [
torch.rand((1, 4, 32 // 2**(i + 1), 32 // 2**(i + 1)))
for i in range(5)
]
mask_pred, x = semantic_head(feats)
labels = torch.randint(0, 6, (1, 1, 64, 64))
loss = semantic_head.loss(mask_pred, labels)
self.assertIsInstance(loss, Tensor)
| 1,013 | 28.823529 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_htc_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import HTCMaskHead
class TestHTCMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = HTCMaskHead(
with_conv_res=True,
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
x = torch.rand((1, 1, 10, 10))
res_feat = torch.rand((1, 1, 10, 10))
with self.assertRaises(AssertionError):
mask_head(x, return_logits=False, return_feat=False)
results = mask_head(x)
self.assertEqual(len(results), 2)
results = mask_head(x, res_feat=res_feat)
self.assertEqual(len(results), 2)
results = mask_head(x, return_logits=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, return_feat=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, res_feat=res_feat, return_logits=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, res_feat=res_feat, return_feat=False)
self.assertIsInstance(results, Tensor)
| 1,504 | 31.021277 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_fcn_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
| 3,432 | 37.573034 | 79 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_feature_relay_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import FeatureRelayHead
class TestFeatureRelayHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_head = FeatureRelayHead(in_channels=10, out_conv_channels=10)
x = torch.rand((1, 10))
results = mask_head(x)
self.assertIsInstance(results, Tensor)
x = torch.empty((0, 10))
results = mask_head(x)
self.assertEqual(results, None)
| 795 | 28.481481 | 74 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_grid_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import GridHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestGridHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_grid_head_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
train_cfg = ConfigDict(dict(pos_radius=1))
# prepare ground truth
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(batch_data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare grid feats
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results])
grid_feats = torch.rand((pos_bboxes.size(0), 256, 14, 14)).to(device)
sample_idx = torch.arange(0, pos_bboxes.size(0))
grid_pred = grid_head(grid_feats)
grid_head.loss(grid_pred, sample_idx, sampling_results, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
grid_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
grid_preds = grid_head(grid_feats)
grid_head.predict_by_feat(
grid_preds=grid_preds,
results_list=[results],
batch_img_metas=[img_metas])
| 3,034 | 34.290698 | 77 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_coarse_mask_head.py
|
import unittest
import torch
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import CoarseMaskHead
class TestCoarseMaskHead(unittest.TestCase):
def test_init(self):
with self.assertRaises(AssertionError):
CoarseMaskHead(num_fcs=0)
with self.assertRaises(AssertionError):
CoarseMaskHead(downsample_factor=0.5)
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
x = torch.rand((1, 32, 7, 7)).to(device)
mask_head = CoarseMaskHead(
downsample_factor=2,
in_channels=32,
conv_out_channels=32,
roi_feat_size=7).to(device)
mask_head.init_weights()
res = mask_head(x)
self.assertEqual(res.shape[-2:], (3, 3))
mask_head = CoarseMaskHead(
downsample_factor=1,
in_channels=32,
conv_out_channels=32,
roi_feat_size=7).to(device)
mask_head.init_weights()
res = mask_head(x)
self.assertEqual(res.shape[-2:], (7, 7))
| 1,229 | 28.285714 | 72 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_roi_extractors/test_generic_roi_extractor.py
|
import unittest
import torch
from mmdet.models.roi_heads.roi_extractors import GenericRoIExtractor
class TestGenericRoIExtractor(unittest.TestCase):
def test_init(self):
with self.assertRaises(AssertionError):
GenericRoIExtractor(
aggregation='other',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32])
roi_extractor = GenericRoIExtractor(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32])
self.assertFalse(roi_extractor.with_pre)
self.assertFalse(roi_extractor.with_post)
def test_forward(self):
# test with pre/post
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=16,
out_channels=16,
kernel_size=5,
padding=2,
inplace=False),
post_cfg=dict(
type='ConvModule',
in_channels=16,
out_channels=16,
kernel_size=5,
padding=2,
inplace=False))
roi_extractor = GenericRoIExtractor(**cfg)
# empty rois
feats = (
torch.rand((1, 16, 200, 336)),
torch.rand((1, 16, 100, 168)),
)
rois = torch.empty((0, 5), dtype=torch.float32)
res = roi_extractor(feats, rois)
self.assertEqual(len(res), 0)
# single scale feature
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
feats = (torch.rand((1, 16, 200, 336)), )
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
# multi-scale features
feats = (
torch.rand((1, 16, 200, 336)),
torch.rand((1, 16, 100, 168)),
torch.rand((1, 16, 50, 84)),
torch.rand((1, 16, 25, 42)),
)
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
# test w.o. pre/post concat
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16 * 4,
featmap_strides=[4, 8, 16, 32])
roi_extractor = GenericRoIExtractor(**cfg)
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 64, 7, 7))
# test concat channels number
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256 * 5, # 256*5 != 256*4
featmap_strides=[4, 8, 16, 32])
roi_extractor = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
# out_channels does not sum of feat channels
with self.assertRaises(AssertionError):
roi_extractor(feats, rois)
| 3,384 | 32.186275 | 78 |
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_roi_extractors/test_single_level_roi_extractor.py
|
import unittest
import torch
from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor
class TestSingleRoIExtractor(unittest.TestCase):
def test_forward(self):
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32])
roi_extractor = SingleRoIExtractor(**cfg)
# empty rois
feats = (torch.rand((1, 16, 200, 336)), )
rois = torch.empty((0, 5), dtype=torch.float32)
res = roi_extractor(feats, rois)
self.assertEqual(len(res), 0)
# single scale feature
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
# multi-scale features
feats = (
torch.rand((1, 16, 200, 336)),
torch.rand((1, 16, 100, 168)),
torch.rand((1, 16, 50, 84)),
torch.rand((1, 16, 25, 42)),
)
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
res = roi_extractor(feats, rois, roi_scale_factor=2.0)
self.assertEqual(res.shape, (1, 16, 7, 7))
| 1,246 | 30.175 | 78 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_objects365.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import Objects365V1Dataset, Objects365V2Dataset
class TestObjects365V1Dataset(unittest.TestCase):
def test_obj365v1_dataset(self):
# test Objects365V1Dataset
metainfo = dict(classes=('bus', 'car'), task_name='new_task')
dataset = Objects365V1Dataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
self.assertEqual(dataset.cat_ids, [1, 2])
def test_obj365v1_with_unsorted_annotation(self):
# test Objects365V1Dataset with unsorted annotations
metainfo = dict(classes=('bus', 'car'), task_name='new_task')
dataset = Objects365V1Dataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/Objects365/unsorted_obj365_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
# sort the unsorted annotations
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
self.assertEqual(dataset.cat_ids, [1, 2])
def test_obj365v1_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(classes=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
Objects365V1Dataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
class TestObjects365V2Dataset(unittest.TestCase):
def test_obj365v2_dataset(self):
# test Objects365V2Dataset
metainfo = dict(classes=('bus', 'car'), task_name='new_task')
dataset = Objects365V2Dataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
self.assertEqual(dataset.cat_ids, [1, 2])
def test_obj365v1_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(classes=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
Objects365V2Dataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
| 3,362 | 41.0375 | 73 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_crowdhuman.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CrowdHumanDataset
class TestCrowdHumanDataset(unittest.TestCase):
def test_crowdhuman_init(self):
dataset = CrowdHumanDataset(
data_root='tests/data/crowdhuman_dataset/',
ann_file='test_annotation_train.odgt',
data_prefix=dict(img='Images/'),
pipeline=[])
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['classes'], ('person', ))
| 521 | 29.705882 | 67 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_coco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(classes=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['classes'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
self.assertEqual(len(dataset), 4)
# test with test_mode = True
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
test_mode=True,
pipeline=[])
self.assertEqual(len(dataset), 4)
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(classes=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
| 1,771 | 35.163265 | 71 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_wider_face.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import cv2
import numpy as np
from mmdet.datasets import WIDERFaceDataset
class TestWIDERFaceDataset(unittest.TestCase):
def setUp(self) -> None:
img_path = 'tests/data/WIDERFace/WIDER_train/0--Parade/0_Parade_marchingband_1_5.jpg' # noqa: E501
dummy_img = np.zeros((683, 1024, 3), dtype=np.uint8)
cv2.imwrite(img_path, dummy_img)
def test_wider_face_dataset(self):
dataset = WIDERFaceDataset(
data_root='tests/data/WIDERFace',
ann_file='train.txt',
data_prefix=dict(img='WIDER_train'),
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
data_list = dataset.load_data_list()
self.assertEqual(len(data_list), 1)
self.assertEqual(len(data_list[0]['instances']), 10)
| 880 | 29.37931 | 107 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_tta.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from unittest import TestCase
import mmcv
import pytest
from mmdet.datasets.transforms import * # noqa
from mmdet.registry import TRANSFORMS
class TestMuitiScaleFlipAug(TestCase):
def test_exception(self):
with pytest.raises(TypeError):
tta_transform = dict(
type='TestTimeAug',
transforms=[dict(type='Resize', keep_ratio=False)],
)
TRANSFORMS.build(tta_transform)
def test_multi_scale_flip_aug(self):
tta_transform = dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=scale, keep_ratio=False)
for scale in [(256, 256), (512, 512), (1024, 1024)]
],
[
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor'))
]])
tta_module = TRANSFORMS.build(tta_transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
results['img_id'] = '1'
results['img_path'] = 'data/color.jpg'
results['img'] = img
results['ori_shape'] = img.shape
results['ori_height'] = img.shape[0]
results['ori_width'] = img.shape[1]
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
tta_results = tta_module(results.copy())
assert [img.shape
for img in tta_results['inputs']] == [(3, 256, 256),
(3, 512, 512),
(3, 1024, 1024)]
tta_transform = dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale=scale, keep_ratio=False)
for scale in [(256, 256), (512, 512), (1024, 1024)]
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
],
[
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]
])
tta_module = TRANSFORMS.build(tta_transform)
tta_results: dict = tta_module(results.copy())
assert [img.shape
for img in tta_results['inputs']] == [(3, 256, 256),
(3, 256, 256),
(3, 512, 512),
(3, 512, 512),
(3, 1024, 1024),
(3, 1024, 1024)]
assert [
data_sample.metainfo['flip']
for data_sample in tta_results['data_samples']
] == [False, True, False, True, False, True]
tta_transform = dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=(512, 512), keep_ratio=False)
],
[
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor'))
]])
tta_module = TRANSFORMS.build(tta_transform)
tta_results = tta_module(results.copy())
assert [tta_results['inputs'][0].shape] == [(3, 512, 512)]
tta_transform = dict(
type='TestTimeAug',
transforms=[
[dict(type='Resize', scale=(512, 512), keep_ratio=False)],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
],
[
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]
])
tta_module = TRANSFORMS.build(tta_transform)
tta_results = tta_module(results.copy())
assert [img.shape for img in tta_results['inputs']] == [(3, 512, 512),
(3, 512, 512)]
assert [
data_sample.metainfo['flip']
for data_sample in tta_results['data_samples']
] == [False, True]
tta_transform = dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale_factor=r, keep_ratio=False)
for r in [0.5, 1.0, 2.0]
],
[
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor'))
]])
tta_module = TRANSFORMS.build(tta_transform)
tta_results = tta_module(results.copy())
assert [img.shape for img in tta_results['inputs']] == [(3, 144, 256),
(3, 288, 512),
(3, 576, 1024)]
tta_transform = dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in [0.5, 1.0, 2.0]
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
],
[
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]
])
tta_module = TRANSFORMS.build(tta_transform)
tta_results = tta_module(results.copy())
assert [img.shape for img in tta_results['inputs']] == [(3, 144, 256),
(3, 144, 256),
(3, 288, 512),
(3, 288, 512),
(3, 576, 1024),
(3, 576, 1024)]
assert [
data_sample.metainfo['flip']
for data_sample in tta_results['data_samples']
] == [False, True, False, True, False, True]
| 7,531 | 41.314607 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_pascal_voc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import VOCDataset
class TestVOCDataset(unittest.TestCase):
def test_voc2007_init(self):
dataset = VOCDataset(
data_root='tests/data/VOCdevkit/',
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
data_list = dataset.load_data_list()
self.assertEqual(len(data_list), 1)
self.assertEqual(len(data_list[0]['instances']), 2)
self.assertEqual(dataset.get_cat_ids(0), [11, 14])
def test_voc2012_init(self):
dataset = VOCDataset(
data_root='tests/data/VOCdevkit/',
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
data_list = dataset.load_data_list()
self.assertEqual(len(data_list), 1)
self.assertEqual(len(data_list[0]['instances']), 1)
self.assertEqual(dataset.get_cat_ids(0), [18])
| 1,367 | 34.076923 | 69 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_cityscapes.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import unittest
from mmengine.fileio import dump
from mmdet.datasets import CityscapesDataset
class TestCityscapesDataset(unittest.TestCase):
def setUp(self) -> None:
image1 = {
'file_name': 'munster/munster_000102_000019_leftImg8bit.png',
'height': 1024,
'width': 2048,
'segm_file': 'munster/munster_000102_000019_gtFine_labelIds.png',
'id': 0
}
image2 = {
'file_name': 'munster/munster_000157_000019_leftImg8bit.png',
'height': 1024,
'width': 2048,
'segm_file': 'munster/munster_000157_000019_gtFine_labelIds.png',
'id': 1
}
image3 = {
'file_name': 'munster/munster_000139_000019_leftImg8bit.png',
'height': 1024,
'width': 2048,
'segm_file': 'munster/munster_000139_000019_gtFine_labelIds.png',
'id': 2
}
image4 = {
'file_name': 'munster/munster_000034_000019_leftImg8bit.png',
'height': 31,
'width': 15,
'segm_file': 'munster/munster_000034_000019_gtFine_labelIds.png',
'id': 3
}
images = [image1, image2, image3, image4]
categories = [{
'id': 24,
'name': 'person'
}, {
'id': 25,
'name': 'rider'
}, {
'id': 26,
'name': 'car'
}]
annotations = [
{
'iscrowd': 0,
'category_id': 24,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': 2595,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 0,
'id': 0
},
{
'iscrowd': 0,
'category_id': 25,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': -1,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 0,
'id': 1
},
{
'iscrowd': 0,
'category_id': 26,
'bbox': [379.0, 435.0, -1, 124.0],
'area': 2,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 0,
'id': 2
},
{
'iscrowd': 0,
'category_id': 24,
'bbox': [379.0, 435.0, 52.0, -1],
'area': 2,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 0,
'id': 3
},
{
'iscrowd': 0,
'category_id': 1,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': 2595,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 0,
'id': 4
},
{
'iscrowd': 1,
'category_id': 26,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': 2595,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 1,
'id': 5
},
{
'iscrowd': 0,
'category_id': 26,
'bbox': [379.0, 435.0, 10, 2],
'area': 2595,
'segmentation': {
'size': [1024, 2048],
'counts': 'xxx'
},
'image_id': 3,
'id': 6
},
]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
self.json_name = 'cityscapes.json'
dump(fake_json, self.json_name)
self.metainfo = dict(classes=('person', 'rider', 'car'))
def tearDown(self):
os.remove(self.json_name)
def test_cityscapes_dataset(self):
dataset = CityscapesDataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 1)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = CityscapesDataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
def test_cityscapes_dataset_without_filter_cfg(self):
dataset = CityscapesDataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
filter_cfg=None,
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = CityscapesDataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
test_mode=True,
filter_cfg=None,
pipeline=[])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
| 6,498 | 30.396135 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_coco_panoptic.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import unittest
from mmengine.fileio import dump
from mmdet.datasets import CocoPanopticDataset
class TestCocoPanopticDataset(unittest.TestCase):
def setUp(self):
image1 = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name1.jpg',
}
image2 = {
'id': 1,
'width': 640,
'height': 800,
'file_name': 'fake_name2.jpg',
}
image3 = {
'id': 2,
'width': 31,
'height': 40,
'file_name': 'fake_name3.jpg',
}
image4 = {
'id': 3,
'width': 400,
'height': 400,
'file_name': 'fake_name4.jpg',
}
images = [image1, image2, image3, image4]
annotations = [
{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
}, {
'id': 2,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [1, 189, 612, 285],
'area': 70036
}],
'file_name':
'fake_name1.jpg',
'image_id':
0
},
{
'segments_info': [
{
# Different to instance style json, there
# are duplicate ids in panoptic style json
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
},
{
'id': 4,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 1
},
{
'id': 5,
'category_id': 2,
'iscrowd': 0,
'bbox': [100, 200, 200, 300],
'area': 66666
},
{
'id': 6,
'category_id': 0,
'iscrowd': 0,
'bbox': [1, 189, -10, 285],
'area': -2
},
{
'id': 10,
'category_id': 0,
'iscrowd': 0,
'bbox': [1, 189, 10, -285],
'area': 100
}
],
'file_name':
'fake_name2.jpg',
'image_id':
1
},
{
'segments_info': [{
'id': 7,
'category_id': 0,
'area': 25,
'bbox': [0, 0, 5, 5],
'iscrowd': 0
}],
'file_name':
'fake_name3.jpg',
'image_id':
2
},
{
'segments_info': [{
'id': 8,
'category_id': 0,
'area': 25,
'bbox': [0, 0, 400, 400],
'iscrowd': 1
}],
'file_name':
'fake_name4.jpg',
'image_id':
3
}
]
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
'isthing': 1
}, {
'id': 1,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
self.json_name = 'coco_panoptic.json'
dump(fake_json, self.json_name)
self.metainfo = dict(
classes=('person', 'car', 'wall'),
thing_classes=('person', 'car'),
stuff_classes=('wall', ))
def tearDown(self):
os.remove(self.json_name)
def test_coco_panoptic_dataset(self):
dataset = CocoPanopticDataset(
data_root='./',
ann_file=self.json_name,
data_prefix=dict(img='imgs', seg='seg'),
metainfo=self.metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
self.assertEqual(dataset.metainfo['thing_classes'],
self.metainfo['thing_classes'])
self.assertEqual(dataset.metainfo['stuff_classes'],
self.metainfo['stuff_classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 2)
self.assertEqual(len(dataset.load_data_list()), 4)
# test mode
dataset = CocoPanopticDataset(
data_root='./',
ann_file=self.json_name,
data_prefix=dict(img='imgs', seg='seg'),
metainfo=self.metainfo,
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
self.assertEqual(dataset.metainfo['thing_classes'],
self.metainfo['thing_classes'])
self.assertEqual(dataset.metainfo['stuff_classes'],
self.metainfo['stuff_classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
def test_coco_panoptic_dataset_without_filter_cfg(self):
dataset = CocoPanopticDataset(
data_root='./',
ann_file=self.json_name,
data_prefix=dict(img='imgs', seg='seg'),
metainfo=self.metainfo,
filter_cfg=None,
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
self.assertEqual(dataset.metainfo['thing_classes'],
self.metainfo['thing_classes'])
self.assertEqual(dataset.metainfo['stuff_classes'],
self.metainfo['stuff_classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
# test mode
dataset = CocoPanopticDataset(
data_root='./',
ann_file=self.json_name,
data_prefix=dict(img='imgs', seg='seg'),
metainfo=self.metainfo,
filter_cfg=None,
test_mode=True,
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
self.assertEqual(dataset.metainfo['thing_classes'],
self.metainfo['thing_classes'])
self.assertEqual(dataset.metainfo['stuff_classes'],
self.metainfo['stuff_classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
| 8,201 | 31.808 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_lvis.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import unittest
from mmengine.fileio import dump
from mmdet.datasets import LVISV1Dataset, LVISV05Dataset
try:
import lvis
except ImportError:
lvis = None
class TestLVISDataset(unittest.TestCase):
def setUp(self) -> None:
image1 = {
# ``coco_url`` for v1 only.
'coco_url': 'http://images.cocodataset.org/train2017/0.jpg',
# ``file_name`` for v0.5 only.
'file_name': '0.jpg',
'height': 1024,
'width': 2048,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'id': 0
}
image2 = {
'coco_url': 'http://images.cocodataset.org/train2017/1.jpg',
'file_name': '1.jpg',
'height': 1024,
'width': 2048,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'id': 1
}
image3 = {
'coco_url': 'http://images.cocodataset.org/train2017/2.jpg',
'file_name': '2.jpg',
'height': 1024,
'width': 2048,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'id': 2
}
image4 = {
'coco_url': 'http://images.cocodataset.org/train2017/3.jpg',
'file_name': '3.jpg',
'height': 31,
'width': 15,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'id': 3
}
images = [image1, image2, image3, image4]
categories = [{
'id': 1,
'name': 'aerosol_can',
'frequency': 'c',
'image_count': 64
}, {
'id': 2,
'name': 'air_conditioner',
'frequency': 'f',
'image_count': 364
}, {
'id': 3,
'name': 'airplane',
'frequency': 'f',
'image_count': 1911
}]
annotations = [
{
'category_id': 1,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': 2595,
'segmentation': [[0.0, 0.0]],
'image_id': 0,
'id': 0
},
{
'category_id': 2,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': -1,
'segmentation': [[0.0, 0.0]],
'image_id': 0,
'id': 1
},
{
'category_id': 3,
'bbox': [379.0, 435.0, -1, 124.0],
'area': 2,
'segmentation': [[0.0, 0.0]],
'image_id': 0,
'id': 2
},
{
'category_id': 1,
'bbox': [379.0, 435.0, 52.0, -1],
'area': 2,
'segmentation': [[0.0, 0.0]],
'image_id': 0,
'id': 3
},
{
'category_id': 1,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': 2595,
'segmentation': [[0.0, 0.0]],
'image_id': 0,
'id': 4
},
{
'category_id': 3,
'bbox': [379.0, 435.0, 52.0, 124.0],
'area': 2595,
'segmentation': [[0.0, 0.0]],
'image_id': 1,
'id': 5
},
{
'category_id': 3,
'bbox': [379.0, 435.0, 10, 2],
'area': 2595,
'segmentation': [[0.0, 0.0]],
'image_id': 3,
'id': 6
},
]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
self.json_name = 'lvis.json'
dump(fake_json, self.json_name)
self.metainfo = dict(
classes=('aerosol_can', 'air_conditioner', 'airplane'))
def tearDown(self):
os.remove(self.json_name)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_lvis05_dataset(self):
dataset = LVISV05Dataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 2)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = LVISV05Dataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_lvis1_dataset(self):
dataset = LVISV1Dataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 2)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = LVISV1Dataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_lvis1_dataset_without_filter_cfg(self):
dataset = LVISV1Dataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
filter_cfg=None,
pipeline=[])
self.assertEqual(dataset.metainfo['classes'], self.metainfo['classes'])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
dataset = LVISV1Dataset(
ann_file=self.json_name,
data_prefix=dict(img='imgs'),
metainfo=self.metainfo,
test_mode=True,
filter_cfg=None,
pipeline=[])
dataset.full_init()
# filter images of small size and images
# with all illegal annotations
self.assertEqual(len(dataset), 4)
self.assertEqual(len(dataset.load_data_list()), 4)
| 7,429 | 31.025862 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_coco_api_wrapper.py
|
import os.path as osp
import tempfile
import unittest
from mmengine.fileio import dump
from mmdet.datasets.api_wrappers import COCOPanoptic
class TestCOCOPanoptic(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
def test_create_index(self):
ann_json = {'test': ['test', 'createIndex']}
annotation_file = osp.join(self.tmp_dir.name, 'createIndex.json')
dump(ann_json, annotation_file)
COCOPanoptic(annotation_file)
def test_load_anns(self):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [
{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
},
],
'file_name':
'fake_name1.png',
'image_id':
0
}]
ann_json = {
'images': images,
'annotations': annotations,
'categories': categories,
}
annotation_file = osp.join(self.tmp_dir.name, 'load_anns.json')
dump(ann_json, annotation_file)
api = COCOPanoptic(annotation_file)
api.load_anns(1)
self.assertIsNone(api.load_anns(0.1))
| 1,647 | 23.235294 | 73 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_openimages.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset
class TestOpenImagesDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['classes'], ['Airplane'])
class TestOpenImagesChallengeDataset(unittest.TestCase):
def test_init(self):
dataset = OpenImagesChallengeDataset(
data_root='tests/data/OpenImages/',
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
data_prefix=dict(img='OpenImages/train/'),
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='annotations/image-metas.pkl',
pipeline=[])
dataset.full_init()
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['classes'], ['Airplane'])
| 1,439 | 37.918919 | 77 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_samplers/test_multi_source_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import bisect
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from torch.utils.data import ConcatDataset, Dataset
from mmdet.datasets.samplers import GroupMultiSourceSampler, MultiSourceSampler
class DummyDataset(Dataset):
def __init__(self, length, flag):
self.length = length
self.flag = flag
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(
width=self.shapes[idx][0],
height=self.shapes[idx][1],
flag=self.flag)
class DummyConcatDataset(ConcatDataset):
def _get_ori_dataset_idx(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[
dataset_idx - 1]
return dataset_idx, sample_idx
def get_data_info(self, idx: int):
dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)
return self.datasets[dataset_idx].get_data_info(sample_idx)
class TestMultiSourceSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_multi_source_sampler(self):
# test dataset is not ConcatDataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=5, source_ratio=[1, 4])
# test invalid batch_size
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=-5, source_ratio=[1, 4])
# test source_ratio longer then dataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 2, 4])
sampler = MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
for i in range(100):
idx = next(sampler)
flags.append(self.dataset.get_data_info(idx)['flag'])
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
class TestGroupMultiSourceSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_group_multi_source_sampler(self):
sampler = GroupMultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
groups = []
for i in range(100):
idx = next(sampler)
data_info = self.dataset.get_data_info(idx)
flags.append(data_info['flag'])
group = 0 if data_info['width'] < data_info['height'] else 1
groups.append(group)
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
groups = set(
[sum(x) for x in (groups[k:k + 5] for k in range(0, 100, 5))])
groups_gt = set([0, 5])
self.assertEqual(groups, groups_gt)
| 3,732 | 33.564815 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_samplers/test_batch_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from mmengine.dataset import DefaultSampler
from torch.utils.data import Dataset
from mmdet.datasets.samplers import AspectRatioBatchSampler
class DummyDataset(Dataset):
def __init__(self, length):
self.length = length
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(width=self.shapes[idx][0], height=self.shapes[idx][1])
class TestAspectRatioBatchSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(0, 1))
def setUp(self, mock):
self.length = 100
self.dataset = DummyDataset(self.length)
self.sampler = DefaultSampler(self.dataset, shuffle=False)
def test_invalid_inputs(self):
with self.assertRaisesRegex(
ValueError, 'batch_size should be a positive integer value'):
AspectRatioBatchSampler(self.sampler, batch_size=-1)
with self.assertRaisesRegex(
TypeError, 'sampler should be an instance of ``Sampler``'):
AspectRatioBatchSampler(None, batch_size=1)
def test_divisible_batch(self):
batch_size = 5
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
self.assertEqual(len(batch_sampler), self.length // batch_size)
for batch_idxs in batch_sampler:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
def test_indivisible_batch(self):
batch_size = 7
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=False)
all_batch_idxs = list(batch_sampler)
self.assertEqual(
len(batch_sampler), (self.length + batch_size - 1) // batch_size)
self.assertEqual(
len(all_batch_idxs), (self.length + batch_size - 1) // batch_size)
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
all_batch_idxs = list(batch_sampler)
self.assertEqual(len(batch_sampler), self.length // batch_size)
self.assertEqual(len(all_batch_idxs), self.length // batch_size)
# the last batch may not have the same aspect ratio
for batch_idxs in all_batch_idxs[:-1]:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
| 2,989 | 35.91358 | 78 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_instaboost.py
|
import os.path as osp
import unittest
import numpy as np
from mmdet.registry import TRANSFORMS
from mmdet.utils import register_all_modules
register_all_modules()
class TestInstaboost(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img_path = osp.join(osp.dirname(__file__), '../../data/gray.jpg')
self.results = {
'img_path':
img_path,
'img_shape': (300, 400),
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
def test_transform(self):
load = TRANSFORMS.build(dict(type='LoadImageFromFile'))
instaboost_transform = TRANSFORMS.build(dict(type='InstaBoost'))
# Execute transforms
results = load(self.results)
results = instaboost_transform(results)
self.assertEqual(results['img'].dtype, np.uint8)
self.assertIn('instances', results)
def test_repr(self):
instaboost_transform = TRANSFORMS.build(dict(type='InstaBoost'))
self.assertEqual(
repr(instaboost_transform), 'InstaBoost(aug_ratio=0.5)')
| 1,782 | 29.220339 | 77 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_formatting.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
| 4,415 | 42.294118 | 78 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_wrappers.py
|
import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch, RandomOrder
from mmdet.utils import register_all_modules
from .utils import construct_toy_data
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')
self.results = {
'img_path':
img_path,
'img_id':
12345,
'img_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
self.branch_field = ['sup', 'sup_teacher', 'sup_student']
self.weak_pipeline = [
dict(type='ShearX'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.strong_pipeline = [
dict(type='ShearX'),
dict(type='ShearY'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.labeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
branch_field=self.branch_field,
sup_teacher=self.weak_pipeline,
sup_student=self.strong_pipeline),
]
self.unlabeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
branch_field=self.branch_field,
unsup_teacher=self.weak_pipeline,
unsup_student=self.strong_pipeline),
]
def test_transform(self):
labeled_pipeline = Compose(self.labeled_pipeline)
labeled_results = labeled_pipeline(copy.deepcopy(self.results))
unlabeled_pipeline = Compose(self.unlabeled_pipeline)
unlabeled_results = unlabeled_pipeline(copy.deepcopy(self.results))
# test branch sup_teacher and sup_student
sup_branches = ['sup_teacher', 'sup_student']
for branch in sup_branches:
self.assertIn(branch, labeled_results['data_samples'])
self.assertIn('homography_matrix',
labeled_results['data_samples'][branch])
self.assertIn('labels',
labeled_results['data_samples'][branch].gt_instances)
self.assertIn('bboxes',
labeled_results['data_samples'][branch].gt_instances)
self.assertIn('masks',
labeled_results['data_samples'][branch].gt_instances)
self.assertIn('gt_sem_seg',
labeled_results['data_samples'][branch])
# test branch unsup_teacher and unsup_student
unsup_branches = ['unsup_teacher', 'unsup_student']
for branch in unsup_branches:
self.assertIn(branch, unlabeled_results['data_samples'])
self.assertIn('homography_matrix',
unlabeled_results['data_samples'][branch])
self.assertNotIn(
'labels',
unlabeled_results['data_samples'][branch].gt_instances)
self.assertNotIn(
'bboxes',
unlabeled_results['data_samples'][branch].gt_instances)
self.assertNotIn(
'masks',
unlabeled_results['data_samples'][branch].gt_instances)
self.assertNotIn('gt_sem_seg',
unlabeled_results['data_samples'][branch])
def test_repr(self):
pipeline = [dict(type='PackDetInputs', meta_keys=())]
transform = MultiBranch(
branch_field=self.branch_field, sup=pipeline, unsup=pipeline)
self.assertEqual(
repr(transform),
("MultiBranch(branch_pipelines=['sup', 'unsup'])"))
class TestRandomOrder(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = construct_toy_data(poly2mask=True)
self.pipeline = [
dict(type='Sharpness'),
dict(type='Contrast'),
dict(type='Brightness'),
dict(type='Rotate'),
dict(type='ShearX'),
dict(type='TranslateY')
]
def test_transform(self):
transform = RandomOrder(self.pipeline)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img_shape'], self.results['img_shape'])
self.assertEqual(results['gt_bboxes'].shape,
self.results['gt_bboxes'].shape)
self.assertEqual(results['gt_bboxes_labels'],
self.results['gt_bboxes_labels'])
self.assertEqual(results['gt_ignore_flags'],
self.results['gt_ignore_flags'])
self.assertEqual(results['gt_masks'].masks.shape,
self.results['gt_masks'].masks.shape)
self.assertEqual(results['gt_seg_map'].shape,
self.results['gt_seg_map'].shape)
def test_repr(self):
transform = RandomOrder(self.pipeline)
self.assertEqual(
repr(transform), ('RandomOrder(Sharpness, Contrast, '
'Brightness, Rotate, ShearX, TranslateY, )'))
| 6,834 | 38.281609 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
def construct_toy_data(poly2mask, use_box_type=False):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
results['img'] = img
results['img_shape'] = img.shape[:2]
if use_box_type:
results['gt_bboxes'] = HorizontalBoxes(
np.array([[1, 0, 2, 2]], dtype=np.float32))
else:
results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)
results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
if poly2mask:
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
else:
raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]
results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)
results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],
dtype=np.uint8)
return results
def check_result_same(results, pipeline_results, check_keys):
"""Check whether the ``pipeline_results`` is the same with the predefined
``results``.
Args:
results (dict): Predefined results which should be the standard
output of the transform pipeline.
pipeline_results (dict): Results processed by the transform
pipeline.
check_keys (tuple): Keys that need to be checked between
results and pipeline_results.
"""
for key in check_keys:
if results.get(key, None) is None:
continue
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert_allclose(pipeline_results[key].to_ndarray(),
results[key].to_ndarray())
elif isinstance(results[key], BaseBoxes):
assert_allclose(pipeline_results[key].tensor, results[key].tensor)
else:
assert_allclose(pipeline_results[key], results[key])
| 3,135 | 39.727273 | 78 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_loading.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import os.path as osp
import sys
import unittest
from unittest.mock import MagicMock, Mock, patch
import mmcv
import numpy as np
from mmdet.datasets.transforms import (FilterAnnotations, LoadAnnotations,
LoadEmptyAnnotations,
LoadImageFromNDArray,
LoadMultiChannelImageFromFiles,
LoadProposals)
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures.mask import BitmapMasks, PolygonMasks
try:
import panopticapi
except ImportError:
panopticapi = None
class TestLoadAnnotations(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.results = {
'ori_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
def test_load_bboxes(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
box_type=None)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes', results)
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20],
[10, 10, 110, 120],
[50, 50, 60,
80]])).all())
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertTrue((results['gt_ignore_flags'] == np.array([0, 0,
1])).all())
self.assertEqual(results['gt_ignore_flags'].dtype, bool)
def test_load_labels(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=True,
with_seg=False,
with_mask=False,
)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes_labels', results)
self.assertTrue((results['gt_bboxes_labels'] == np.array([1, 2,
2])).all())
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
def test_load_mask(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=False,
with_seg=False,
with_mask=True,
poly2mask=False)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_masks', results)
self.assertEqual(len(results['gt_masks']), 3)
self.assertIsInstance(results['gt_masks'], PolygonMasks)
def test_load_mask_poly2mask(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=False,
with_seg=False,
with_mask=True,
poly2mask=True)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_masks', results)
self.assertEqual(len(results['gt_masks']), 3)
self.assertIsInstance(results['gt_masks'], BitmapMasks)
def test_repr(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
)
self.assertEqual(
repr(transform), ('LoadAnnotations(with_bbox=True, '
'with_label=False, with_mask=False, '
'with_seg=False, poly2mask=True, '
"imdecode_backend='cv2', "
'backend_args=None)'))
class TestFilterAnnotations(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]]),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=np.bool8),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
}
def test_transform(self):
# test keep_empty = True
transform = FilterAnnotations(
min_gt_bbox_wh=(50, 50),
keep_empty=True,
)
results = transform(copy.deepcopy(self.results))
self.assertIsNone(results)
# test keep_empty = False
transform = FilterAnnotations(
min_gt_bbox_wh=(50, 50),
keep_empty=False,
)
results = transform(copy.deepcopy(self.results))
self.assertTrue(isinstance(results, dict))
# test filter annotations
transform = FilterAnnotations(min_gt_bbox_wh=(15, 15), )
results = transform(copy.deepcopy(self.results))
self.assertIsInstance(results, dict)
self.assertTrue((results['gt_bboxes_labels'] == np.array([2,
3])).all())
self.assertTrue((results['gt_bboxes'] == np.array([[20, 20, 40, 40],
[40, 40, 80,
80]])).all())
self.assertEqual(len(results['gt_masks']), 2)
self.assertEqual(len(results['gt_ignore_flags']), 2)
def test_repr(self):
transform = FilterAnnotations(
min_gt_bbox_wh=(1, 1),
keep_empty=False,
)
self.assertEqual(
repr(transform), ('FilterAnnotations(min_gt_bbox_wh=(1, 1), '
'keep_empty=False)'))
class TestLoadPanopticAnnotations(unittest.TestCase):
def setUp(self):
seg_map = np.zeros((10, 10), dtype=np.int32)
seg_map[:5, :10] = 1 + 10 * INSTANCE_OFFSET
seg_map[5:10, :5] = 4 + 11 * INSTANCE_OFFSET
seg_map[5:10, 5:10] = 6 + 0 * INSTANCE_OFFSET
rgb_seg_map = np.zeros((10, 10, 3), dtype=np.uint8)
rgb_seg_map[:, :, 0] = seg_map / (256 * 256)
rgb_seg_map[:, :, 1] = seg_map % (256 * 256) / 256
rgb_seg_map[:, :, 2] = seg_map % 256
self.seg_map_path = './1.png'
mmcv.imwrite(rgb_seg_map, self.seg_map_path)
self.seg_map = seg_map
self.rgb_seg_map = rgb_seg_map
self.results = {
'ori_shape': (10, 10),
'instances': [{
'bbox': [0, 0, 10, 5],
'bbox_label': 0,
'ignore_flag': 0,
}, {
'bbox': [0, 5, 5, 10],
'bbox_label': 1,
'ignore_flag': 1,
}],
'segments_info': [
{
'id': 1 + 10 * INSTANCE_OFFSET,
'category': 0,
'is_thing': True,
},
{
'id': 4 + 11 * INSTANCE_OFFSET,
'category': 1,
'is_thing': True,
},
{
'id': 6 + 0 * INSTANCE_OFFSET,
'category': 2,
'is_thing': False,
},
],
'seg_map_path':
self.seg_map_path
}
self.gt_mask = BitmapMasks([
(seg_map == 1 + 10 * INSTANCE_OFFSET).astype(np.uint8),
(seg_map == 4 + 11 * INSTANCE_OFFSET).astype(np.uint8),
], 10, 10)
self.gt_bboxes = np.array([[0, 0, 10, 5], [0, 5, 5, 10]],
dtype=np.float32)
self.gt_bboxes_labels = np.array([0, 1], dtype=np.int64)
self.gt_ignore_flags = np.array([0, 1], dtype=bool)
self.gt_seg_map = np.zeros((10, 10), dtype=np.int32)
self.gt_seg_map[:5, :10] = 0
self.gt_seg_map[5:10, :5] = 1
self.gt_seg_map[5:10, 5:10] = 2
def tearDown(self):
os.remove(self.seg_map_path)
@unittest.skipIf(panopticapi is not None, 'panopticapi is installed')
def test_init_without_panopticapi(self):
# test if panopticapi is not installed
from mmdet.datasets.transforms import LoadPanopticAnnotations
with self.assertRaisesRegex(
ImportError,
'panopticapi is not installed, please install it by'):
LoadPanopticAnnotations()
def test_transform(self):
sys.modules['panopticapi'] = MagicMock()
sys.modules['panopticapi.utils'] = MagicMock()
from mmdet.datasets.transforms import LoadPanopticAnnotations
mock_rgb2id = Mock(return_value=self.seg_map)
with patch('panopticapi.utils.rgb2id', mock_rgb2id):
# test with all False
transform = LoadPanopticAnnotations(
with_bbox=False,
with_label=False,
with_mask=False,
with_seg=False)
results = transform(copy.deepcopy(self.results))
self.assertDictEqual(results, self.results)
# test with with_mask=True
transform = LoadPanopticAnnotations(
with_bbox=False,
with_label=False,
with_mask=True,
with_seg=False)
results = transform(copy.deepcopy(self.results))
self.assertTrue(
(results['gt_masks'].masks == self.gt_mask.masks).all())
# test with with_seg=True
transform = LoadPanopticAnnotations(
with_bbox=False,
with_label=False,
with_mask=False,
with_seg=True)
results = transform(copy.deepcopy(self.results))
self.assertNotIn('gt_masks', results)
self.assertTrue((results['gt_seg_map'] == self.gt_seg_map).all())
# test with all True
transform = LoadPanopticAnnotations(
with_bbox=True,
with_label=True,
with_mask=True,
with_seg=True,
box_type=None)
results = transform(copy.deepcopy(self.results))
self.assertTrue(
(results['gt_masks'].masks == self.gt_mask.masks).all())
self.assertTrue((results['gt_bboxes'] == self.gt_bboxes).all())
self.assertTrue(
(results['gt_bboxes_labels'] == self.gt_bboxes_labels).all())
self.assertTrue(
(results['gt_ignore_flags'] == self.gt_ignore_flags).all())
self.assertTrue((results['gt_seg_map'] == self.gt_seg_map).all())
class TestLoadImageFromNDArray(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = {'img': np.zeros((256, 256, 3), dtype=np.uint8)}
def test_transform(self):
transform = LoadImageFromNDArray()
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape, (256, 256, 3))
self.assertEqual(results['img'].dtype, np.uint8)
self.assertEqual(results['img_shape'], (256, 256))
self.assertEqual(results['ori_shape'], (256, 256))
# to_float32
transform = LoadImageFromNDArray(to_float32=True)
results = transform(copy.deepcopy(results))
self.assertEqual(results['img'].dtype, np.float32)
def test_repr(self):
transform = LoadImageFromNDArray()
self.assertEqual(
repr(transform), ('LoadImageFromNDArray('
'ignore_empty=False, '
'to_float32=False, '
"color_type='color', "
"imdecode_backend='cv2', "
'backend_args=None)'))
class TestLoadMultiChannelImageFromFiles(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.img_path = []
for i in range(4):
img_channel_path = f'./part_{i}.jpg'
img_channel = np.zeros((10, 10), dtype=np.uint8)
mmcv.imwrite(img_channel, img_channel_path)
self.img_path.append(img_channel_path)
self.results = {'img_path': self.img_path}
def tearDown(self):
for filename in self.img_path:
os.remove(filename)
def test_transform(self):
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape, (10, 10, 4))
self.assertEqual(results['img'].dtype, np.uint8)
self.assertEqual(results['img_shape'], (10, 10))
self.assertEqual(results['ori_shape'], (10, 10))
# to_float32
transform = LoadMultiChannelImageFromFiles(to_float32=True)
results = transform(copy.deepcopy(results))
self.assertEqual(results['img'].dtype, np.float32)
def test_rper(self):
transform = LoadMultiChannelImageFromFiles()
self.assertEqual(
repr(transform), ('LoadMultiChannelImageFromFiles('
'to_float32=False, '
"color_type='unchanged', "
"imdecode_backend='cv2', "
'backend_args=None)'))
class TestLoadProposals(unittest.TestCase):
def test_transform(self):
transform = LoadProposals()
results = {
'proposals':
dict(
bboxes=np.zeros((5, 4), dtype=np.int64),
scores=np.zeros((5, ), dtype=np.int64))
}
results = transform(results)
self.assertEqual(results['proposals'].dtype, np.float32)
self.assertEqual(results['proposals'].shape[-1], 4)
self.assertEqual(results['proposals_scores'].dtype, np.float32)
# bboxes.shape[1] should be 4
results = {'proposals': dict(bboxes=np.zeros((5, 5), dtype=np.int64))}
with self.assertRaises(AssertionError):
transform(results)
# bboxes.shape[0] should equal to scores.shape[0]
results = {
'proposals':
dict(
bboxes=np.zeros((5, 4), dtype=np.int64),
scores=np.zeros((3, ), dtype=np.int64))
}
with self.assertRaises(AssertionError):
transform(results)
# empty bboxes
results = {
'proposals': dict(bboxes=np.zeros((0, 4), dtype=np.float32))
}
results = transform(results)
excepted_proposals = np.zeros((0, 4), dtype=np.float32)
excepted_proposals_scores = np.zeros(0, dtype=np.float32)
self.assertTrue((results['proposals'] == excepted_proposals).all())
self.assertTrue(
(results['proposals_scores'] == excepted_proposals_scores).all())
transform = LoadProposals(num_max_proposals=2)
results = {
'proposals':
dict(
bboxes=np.zeros((5, 4), dtype=np.int64),
scores=np.zeros((5, ), dtype=np.int64))
}
results = transform(results)
self.assertEqual(results['proposals'].shape[0], 2)
def test_repr(self):
transform = LoadProposals()
self.assertEqual(
repr(transform), 'LoadProposals(num_max_proposals=None)')
class TestLoadEmptyAnnotations(unittest.TestCase):
def test_transform(self):
transform = LoadEmptyAnnotations(
with_bbox=True, with_label=True, with_mask=True, with_seg=True)
results = {'img_shape': (224, 224)}
results = transform(results)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertEqual(results['gt_bboxes'].shape[-1], 4)
self.assertEqual(results['gt_ignore_flags'].dtype, bool)
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['gt_masks'].masks.dtype, np.uint8)
self.assertEqual(results['gt_masks'].masks.shape[-2:],
results['img_shape'])
self.assertEqual(results['gt_seg_map'].dtype, np.uint8)
self.assertEqual(results['gt_seg_map'].shape, results['img_shape'])
def test_repr(self):
transform = LoadEmptyAnnotations()
self.assertEqual(
repr(transform), 'LoadEmptyAnnotations(with_bbox=True, '
'with_label=True, '
'with_mask=False, '
'with_seg=False, '
'seg_ignore_label=255)')
| 17,973 | 36.84 | 78 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_colorspace.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from mmdet.datasets.transforms import (AutoContrast, Brightness, Color,
ColorTransform, Contrast, Equalize,
Invert, Posterize, Sharpness, Solarize,
SolarizeAdd)
from .utils import check_result_same, construct_toy_data
class TestColorTransform(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_colortransform(self):
# test assertion for invalid value of level
with self.assertRaises(AssertionError):
transform = ColorTransform(level=-1)
# test assertion for invalid prob
with self.assertRaises(AssertionError):
transform = ColorTransform(level=1, prob=-0.5)
# test case when no translation is called (prob=0)
transform = ColorTransform(prob=0.0, level=10)
results_wo_color = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_color, self.check_keys)
def test_repr(self):
transform = ColorTransform(level=10, prob=1.)
self.assertEqual(
repr(transform), ('ColorTransform(prob=1.0, '
'level=10, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestColor(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_color(self):
# test case when level=5 (without color aug)
transform = Color(prob=1.0, level=5)
results_wo_color = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_color, self.check_keys)
# test case when level=0
transform = Color(prob=1.0, level=0)
transform(copy.deepcopy(self.results_mask))
# test case when level=10
transform = Color(prob=1.0, level=10)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Color(level=10, prob=1.)
self.assertEqual(
repr(transform), ('Color(prob=1.0, '
'level=10, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestBrightness(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_brightness(self):
# test case when level=5 (without Brightness aug)
transform = Brightness(level=5, prob=1.0)
results_wo_brightness = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_brightness,
self.check_keys)
# test case when level=0
transform = Brightness(prob=1.0, level=0)
transform(copy.deepcopy(self.results_mask))
# test case when level=10
transform = Brightness(prob=1.0, level=10)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Brightness(prob=1.0, level=10)
self.assertEqual(
repr(transform), ('Brightness(prob=1.0, '
'level=10, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestContrast(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_contrast(self):
# test case when level=5 (without Contrast aug)
transform = Contrast(prob=1.0, level=5)
results_wo_contrast = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_contrast,
self.check_keys)
# test case when level=0
transform = Contrast(prob=1.0, level=0)
transform(copy.deepcopy(self.results_mask))
# test case when level=10
transform = Contrast(prob=1.0, level=10)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Contrast(level=10, prob=1.)
self.assertEqual(
repr(transform), ('Contrast(prob=1.0, '
'level=10, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestSharpness(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_sharpness(self):
# test case when level=5 (without Sharpness aug)
transform = Sharpness(prob=1.0, level=5)
results_wo_sharpness = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_sharpness,
self.check_keys)
# test case when level=0
transform = Sharpness(prob=1.0, level=0)
transform(copy.deepcopy(self.results_mask))
# test case when level=10
transform = Sharpness(prob=1.0, level=10)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Sharpness(level=10, prob=1.)
self.assertEqual(
repr(transform), ('Sharpness(prob=1.0, '
'level=10, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestSolarize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_solarize(self):
# test case when level=10 (without Solarize aug)
transform = Solarize(prob=1.0, level=10)
results_wo_solarize = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_solarize,
self.check_keys)
# test case when level=0
transform = Solarize(prob=1.0, level=0)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Solarize(level=10, prob=1.)
self.assertEqual(
repr(transform), ('Solarize(prob=1.0, '
'level=10, '
'min_mag=0.0, '
'max_mag=256.0)'))
class TestSolarizeAdd(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_solarize(self):
# test case when level=0 (without Solarize aug)
transform = SolarizeAdd(prob=1.0, level=0)
results_wo_solarizeadd = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_solarizeadd,
self.check_keys)
# test case when level=10
transform = SolarizeAdd(prob=1.0, level=10)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = SolarizeAdd(level=10, prob=1.)
self.assertEqual(
repr(transform), ('SolarizeAdd(prob=1.0, '
'level=10, '
'min_mag=0.0, '
'max_mag=110.0)'))
class TestPosterize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_posterize(self):
# test case when level=10 (without Posterize aug)
transform = Posterize(prob=1.0, level=10, max_mag=8.0)
results_wo_posterize = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_posterize,
self.check_keys)
# test case when level=0
transform = Posterize(prob=1.0, level=0)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Posterize(level=10, prob=1.)
self.assertEqual(
repr(transform), ('Posterize(prob=1.0, '
'level=10, '
'min_mag=0.0, '
'max_mag=4.0)'))
class TestEqualize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_equalize(self):
# test case when no translation is called (prob=0)
transform = Equalize(prob=0.0)
results_wo_equalize = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_equalize,
self.check_keys)
# test case when translation is called
transform = Equalize(prob=1.0)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Equalize(prob=1.0)
self.assertEqual(
repr(transform), ('Equalize(prob=1.0, '
'level=None, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestAutoContrast(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_autocontrast(self):
# test case when no translation is called (prob=0)
transform = AutoContrast(prob=0.0)
results_wo_autocontrast = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_autocontrast,
self.check_keys)
# test case when translation is called
transform = AutoContrast(prob=1.0)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = AutoContrast(prob=1.0)
self.assertEqual(
repr(transform), ('AutoContrast(prob=1.0, '
'level=None, '
'min_mag=0.1, '
'max_mag=1.9)'))
class TestInvert(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_invert(self):
# test case when no translation is called (prob=0)
transform = Invert(prob=0.0)
results_wo_invert = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_invert,
self.check_keys)
# test case when translation is called
transform = Invert(prob=1.0)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Invert(prob=1.0)
self.assertEqual(
repr(transform), ('Invert(prob=1.0, '
'level=None, '
'min_mag=0.1, '
'max_mag=1.9)'))
| 14,352 | 38.215847 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_augment_wrappers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from mmdet.datasets.transforms import (AutoAugment, AutoContrast, Brightness,
Color, Contrast, Equalize, Invert,
Posterize, RandAugment, Rotate,
Sharpness, ShearX, ShearY, Solarize,
SolarizeAdd, TranslateX, TranslateY)
from mmdet.utils import register_all_modules
from .utils import check_result_same, construct_toy_data
register_all_modules()
class TestAutoAugment(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map',
'homography_matrix')
self.results_mask = construct_toy_data(poly2mask=True)
self.img_fill_val = (104, 116, 124)
self.seg_ignore_label = 255
def test_autoaugment(self):
# test AutoAugment equipped with Shear
policies = [[
dict(type='ShearX', prob=1.0, level=3, reversal_prob=0.0),
dict(type='ShearY', prob=1.0, level=7, reversal_prob=1.0)
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_shearx = ShearX(prob=1.0, level=3, reversal_prob=0.0)
transform_sheary = ShearY(prob=1.0, level=7, reversal_prob=1.0)
results_sheared = transform_sheary(
transform_shearx(copy.deepcopy(self.results_mask)))
check_result_same(results_sheared, results_auto, self.check_keys)
# test AutoAugment equipped with Rotate
policies = [[
dict(type='Rotate', prob=1.0, level=10, reversal_prob=0.0),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_rotate = Rotate(prob=1.0, level=10, reversal_prob=0.0)
results_rotated = transform_rotate(copy.deepcopy(self.results_mask))
check_result_same(results_rotated, results_auto, self.check_keys)
# test AutoAugment equipped with Translate
policies = [[
dict(
type='TranslateX',
prob=1.0,
level=10,
max_mag=1.0,
reversal_prob=0.0),
dict(
type='TranslateY',
prob=1.0,
level=10,
max_mag=1.0,
reversal_prob=1.0)
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_translatex = TranslateX(
prob=1.0, level=10, max_mag=1.0, reversal_prob=0.0)
transform_translatey = TranslateY(
prob=1.0, level=10, max_mag=1.0, reversal_prob=1.0)
results_translated = transform_translatey(
transform_translatex(copy.deepcopy(self.results_mask)))
check_result_same(results_translated, results_auto, self.check_keys)
# test AutoAugment equipped with Brightness
policies = [[
dict(type='Brightness', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_brightness = Brightness(prob=1.0, level=3)
results_brightness = transform_brightness(
copy.deepcopy(self.results_mask))
check_result_same(results_brightness, results_auto, self.check_keys)
# test AutoAugment equipped with Color
policies = [[
dict(type='Color', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_color = Color(prob=1.0, level=3)
results_colored = transform_color(copy.deepcopy(self.results_mask))
check_result_same(results_colored, results_auto, self.check_keys)
# test AutoAugment equipped with Contrast
policies = [[
dict(type='Contrast', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_contrast = Contrast(prob=1.0, level=3)
results_contrasted = transform_contrast(
copy.deepcopy(self.results_mask))
check_result_same(results_contrasted, results_auto, self.check_keys)
# test AutoAugment equipped with Sharpness
policies = [[
dict(type='Sharpness', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_sharpness = Sharpness(prob=1.0, level=3)
results_sharpness = transform_sharpness(
copy.deepcopy(self.results_mask))
check_result_same(results_sharpness, results_auto, self.check_keys)
# test AutoAugment equipped with Solarize
policies = [[
dict(type='Solarize', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_solarize = Solarize(prob=1.0, level=3)
results_solarized = transform_solarize(
copy.deepcopy(self.results_mask))
check_result_same(results_solarized, results_auto, self.check_keys)
# test AutoAugment equipped with SolarizeAdd
policies = [[
dict(type='SolarizeAdd', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_solarizeadd = SolarizeAdd(prob=1.0, level=3)
results_solarizeadded = transform_solarizeadd(
copy.deepcopy(self.results_mask))
check_result_same(results_solarizeadded, results_auto, self.check_keys)
# test AutoAugment equipped with Posterize
policies = [[
dict(type='Posterize', prob=1.0, level=3),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_posterize = Posterize(prob=1.0, level=3)
results_posterized = transform_posterize(
copy.deepcopy(self.results_mask))
check_result_same(results_posterized, results_auto, self.check_keys)
# test AutoAugment equipped with Equalize
policies = [[
dict(type='Equalize', prob=1.0),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_equalize = Equalize(prob=1.0)
results_equalized = transform_equalize(
copy.deepcopy(self.results_mask))
check_result_same(results_equalized, results_auto, self.check_keys)
# test AutoAugment equipped with AutoContrast
policies = [[
dict(type='AutoContrast', prob=1.0),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_autocontrast = AutoContrast(prob=1.0)
results_autocontrast = transform_autocontrast(
copy.deepcopy(self.results_mask))
check_result_same(results_autocontrast, results_auto, self.check_keys)
# test AutoAugment equipped with Invert
policies = [[
dict(type='Invert', prob=1.0),
]]
transform_auto = AutoAugment(policies=policies)
results_auto = transform_auto(copy.deepcopy(self.results_mask))
transform_invert = Invert(prob=1.0)
results_inverted = transform_invert(copy.deepcopy(self.results_mask))
check_result_same(results_inverted, results_auto, self.check_keys)
# test AutoAugment equipped with default policies
transform_auto = AutoAugment()
transform_auto(copy.deepcopy(self.results_mask))
def test_repr(self):
policies = [[
dict(type='Rotate', prob=1.0, level=10, reversal_prob=0.0),
dict(type='Invert', prob=1.0),
]]
transform = AutoAugment(policies=policies)
self.assertEqual(
repr(transform), ('AutoAugment('
'policies=[['
"{'type': 'Rotate', 'prob': 1.0, "
"'level': 10, 'reversal_prob': 0.0}, "
"{'type': 'Invert', 'prob': 1.0}]], "
'prob=None)'))
class TestRandAugment(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map',
'homography_matrix')
self.results_mask = construct_toy_data(poly2mask=True)
self.img_fill_val = (104, 116, 124)
self.seg_ignore_label = 255
def test_randaugment(self):
# test RandAugment equipped with Rotate
aug_space = [[
dict(type='Rotate', prob=1.0, level=10, reversal_prob=0.0)
]]
transform_rand = RandAugment(aug_space=aug_space, aug_num=1)
results_rand = transform_rand(copy.deepcopy(self.results_mask))
transform_rotate = Rotate(prob=1.0, level=10, reversal_prob=0.0)
results_rotated = transform_rotate(copy.deepcopy(self.results_mask))
check_result_same(results_rotated, results_rand, self.check_keys)
# test RandAugment equipped with default augmentation space
transform_rand = RandAugment()
transform_rand(copy.deepcopy(self.results_mask))
def test_repr(self):
aug_space = [
[dict(type='Rotate')],
[dict(type='Invert')],
]
transform = RandAugment(aug_space=aug_space)
self.assertEqual(
repr(transform), ('RandAugment('
'aug_space=['
"[{'type': 'Rotate'}], "
"[{'type': 'Invert'}]], "
'aug_num=2, '
'prob=None)'))
| 10,790 | 42.164 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import construct_toy_data, create_full_masks, create_random_bboxes
__all__ = ['create_random_bboxes', 'create_full_masks', 'construct_toy_data']
| 206 | 40.4 | 78 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_transforms.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import mmcv
import numpy as np
import torch
from mmcv.transforms import LoadImageFromFile
# yapf:disable
from mmdet.datasets.transforms import (CopyPaste, CutOut, Expand,
FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad,
RandomCrop, RandomErasing, RandomFlip,
RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
# yapf:enable
from mmdet.evaluation import bbox_overlaps
from mmdet.registry import TRANSFORMS
from mmdet.structures.bbox import HorizontalBoxes, bbox_project
from mmdet.structures.mask import BitmapMasks
from .utils import construct_toy_data, create_full_masks, create_random_bboxes
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
# yapf:enable
class TestResize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod()
-> tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.data_info1 = dict(
img=np.random.random((1333, 800, 3)),
gt_seg_map=np.random.random((1333, 800, 3)),
gt_bboxes=np.array([[0, 0, 112, 112]], dtype=np.float32),
gt_masks=BitmapMasks(
rng.rand(1, 1333, 800), height=1333, width=800))
self.data_info2 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[200, 150, 600, 450]], dtype=np.float32),
dtype=np.float32)
self.data_info3 = dict(img=np.random.random((300, 400, 3)))
def test_resize(self):
# test keep_ratio is True
transform = Resize(scale=(2000, 2000), keep_ratio=True)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (2000, 1200))
self.assertEqual(results['scale_factor'], (1200 / 800, 2000 / 1333))
# test resize_bboxes/seg/masks
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 168,
224]])).all())
self.assertEqual(results['gt_masks'].height, 2666)
self.assertEqual(results['gt_masks'].width, 1200)
self.assertEqual(results['gt_seg_map'].shape[:2], (2666, 1200))
# test clip_object_border = False
transform = Resize(scale=(200, 150), clip_object_border=False)
results = transform(self.data_info2)
self.assertTrue((results['gt_bboxes'] == np.array([100, 75, 300,
225])).all())
# test only with image
transform = Resize(scale=(200, 150), clip_object_border=False)
results = transform(self.data_info3)
self.assertTupleEqual(results['img'].shape[:2], (150, 200))
# test geometric transformation with homography matrix
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue((bbox_project(
copy.deepcopy(self.data_info1['gt_bboxes']),
results['homography_matrix']) == results['gt_bboxes']).all())
def test_resize_use_box_type(self):
data_info1 = copy.deepcopy(self.data_info1)
data_info1['gt_bboxes'] = HorizontalBoxes(data_info1['gt_bboxes'])
data_info2 = copy.deepcopy(self.data_info2)
data_info2['gt_bboxes'] = HorizontalBoxes(data_info2['gt_bboxes'])
# test keep_ratio is True
transform = Resize(scale=(2000, 2000), keep_ratio=True)
results = transform(copy.deepcopy(data_info1))
self.assertEqual(results['img_shape'], (2000, 1200))
self.assertEqual(results['scale_factor'], (1200 / 800, 2000 / 1333))
# test resize_bboxes/seg/masks
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(data_info1))
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([[0, 0, 168,
224]])).all())
self.assertEqual(results['gt_masks'].height, 2666)
self.assertEqual(results['gt_masks'].width, 1200)
self.assertEqual(results['gt_seg_map'].shape[:2], (2666, 1200))
# test clip_object_border = False
transform = Resize(scale=(200, 150), clip_object_border=False)
results = transform(data_info2)
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([100, 75, 300,
225])).all())
# test geometric transformation with homography matrix
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(data_info1))
self.assertTrue((bbox_project(
copy.deepcopy(data_info1['gt_bboxes'].numpy()),
results['homography_matrix']) == results['gt_bboxes'].numpy()
).all())
def test_repr(self):
transform = Resize(scale=(2000, 2000), keep_ratio=True)
self.assertEqual(
repr(transform), ('Resize(scale=(2000, 2000), '
'scale_factor=None, keep_ratio=True, '
'clip_object_border=True), backend=cv2), '
'interpolation=bilinear)'))
class TestFIXShapeResize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.data_info1 = dict(
img=np.random.random((1333, 800, 3)),
gt_seg_map=np.random.random((1333, 800, 3)),
gt_bboxes=np.array([[0, 0, 112, 1333]], dtype=np.float32),
gt_masks=BitmapMasks(
rng.rand(1, 1333, 800), height=1333, width=800))
self.data_info2 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[200, 150, 600, 450]], dtype=np.float32),
dtype=np.float32)
self.data_info3 = dict(img=np.random.random((300, 400, 3)))
self.data_info4 = dict(
img=np.random.random((600, 800, 3)),
gt_bboxes=np.array([[200, 150, 300, 400]], dtype=np.float32),
dtype=np.float32)
def test_resize(self):
# test keep_ratio is True
transform = FixShapeResize(width=2000, height=800, keep_ratio=True)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (800, 2000))
self.assertEqual(results['scale_factor'], (800 / 1333, 800 / 1333))
# test resize_bboxes/seg/masks
transform = FixShapeResize(width=2000, height=800, keep_ratio=False)
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 280,
800]])).all())
self.assertEqual(results['gt_masks'].height, 800)
self.assertEqual(results['gt_masks'].width, 2000)
self.assertEqual(results['gt_seg_map'].shape[:2], (800, 2000))
# test clip_object_border = False
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(copy.deepcopy(self.data_info2))
self.assertTrue((results['gt_bboxes'] == np.array([100, 75, 300,
225])).all())
# test only with image
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(self.data_info3)
self.assertTupleEqual(results['img'].shape[:2], (150, 200))
# test geometric transformation with homography matrix
transform = FixShapeResize(width=400, height=300)
results = transform(copy.deepcopy(self.data_info4))
self.assertTrue((bbox_project(
copy.deepcopy(self.data_info4['gt_bboxes']),
results['homography_matrix']) == results['gt_bboxes']).all())
def test_resize_with_boxlist(self):
data_info1 = copy.deepcopy(self.data_info1)
data_info1['gt_bboxes'] = HorizontalBoxes(data_info1['gt_bboxes'])
data_info2 = copy.deepcopy(self.data_info2)
data_info2['gt_bboxes'] = HorizontalBoxes(data_info2['gt_bboxes'])
data_info4 = copy.deepcopy(self.data_info4)
data_info4['gt_bboxes'] = HorizontalBoxes(data_info4['gt_bboxes'])
# test keep_ratio is True
transform = FixShapeResize(width=2000, height=800, keep_ratio=True)
results = transform(copy.deepcopy(data_info1))
self.assertEqual(results['img_shape'], (800, 2000))
self.assertEqual(results['scale_factor'], (800 / 1333, 800 / 1333))
# test resize_bboxes/seg/masks
transform = FixShapeResize(width=2000, height=800, keep_ratio=False)
results = transform(copy.deepcopy(data_info1))
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([[0, 0, 280,
800]])).all())
self.assertEqual(results['gt_masks'].height, 800)
self.assertEqual(results['gt_masks'].width, 2000)
self.assertEqual(results['gt_seg_map'].shape[:2], (800, 2000))
# test clip_object_border = False
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(copy.deepcopy(data_info2))
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([100, 75, 300,
225])).all())
# test only with image
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(self.data_info3)
self.assertTupleEqual(results['img'].shape[:2], (150, 200))
# test geometric transformation with homography matrix
transform = FixShapeResize(width=400, height=300)
results = transform(copy.deepcopy(data_info4))
self.assertTrue((bbox_project(
copy.deepcopy(self.data_info4['gt_bboxes']),
results['homography_matrix']) == results['gt_bboxes'].numpy()
).all())
def test_repr(self):
transform = FixShapeResize(width=2000, height=2000, keep_ratio=True)
self.assertEqual(
repr(transform), ('FixShapeResize(width=2000, height=2000, '
'keep_ratio=True, '
'clip_object_border=True), backend=cv2), '
'interpolation=bilinear)'))
class TestRandomFlip(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results1 = {
'img': np.random.random((224, 224, 3)),
'gt_bboxes': np.array([[0, 1, 100, 101]], dtype=np.float32),
'gt_masks':
BitmapMasks(rng.rand(1, 224, 224), height=224, width=224),
'gt_seg_map': np.random.random((224, 224))
}
self.results2 = {'img': self.results1['img']}
def test_transform(self):
# test with image, gt_bboxes, gt_masks, gt_seg_map
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(self.results1))
self.assertTrue(
(results_update['gt_bboxes'] == np.array([[124, 1, 224,
101]])).all())
# test only with image
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(self.results2))
self.assertTrue(
(results_update['img'] == self.results2['img'][:, ::-1]).all())
# test geometric transformation with homography matrix
# (1) Horizontal Flip
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(self.results1))
bboxes = copy.deepcopy(self.results1['gt_bboxes'])
self.assertTrue((bbox_project(
bboxes,
results_update['homography_matrix']) == results_update['gt_bboxes']
).all())
# (2) Vertical Flip
transform = RandomFlip(1.0, direction='vertical')
results_update = transform.transform(copy.deepcopy(self.results1))
bboxes = copy.deepcopy(self.results1['gt_bboxes'])
self.assertTrue((bbox_project(
bboxes,
results_update['homography_matrix']) == results_update['gt_bboxes']
).all())
# (3) Diagonal Flip
transform = RandomFlip(1.0, direction='diagonal')
results_update = transform.transform(copy.deepcopy(self.results1))
bboxes = copy.deepcopy(self.results1['gt_bboxes'])
self.assertTrue((bbox_project(
bboxes,
results_update['homography_matrix']) == results_update['gt_bboxes']
).all())
def test_transform_use_box_type(self):
results1 = copy.deepcopy(self.results1)
results1['gt_bboxes'] = HorizontalBoxes(results1['gt_bboxes'])
# test with image, gt_bboxes, gt_masks, gt_seg_map
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(results1))
self.assertTrue((results_update['gt_bboxes'].numpy() == np.array(
[[124, 1, 224, 101]])).all())
# test geometric transformation with homography matrix
# (1) Horizontal Flip
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(results1))
bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())
self.assertTrue((bbox_project(bboxes,
results_update['homography_matrix']) ==
results_update['gt_bboxes'].numpy()).all())
# (2) Vertical Flip
transform = RandomFlip(1.0, direction='vertical')
results_update = transform.transform(copy.deepcopy(results1))
bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())
self.assertTrue((bbox_project(bboxes,
results_update['homography_matrix']) ==
results_update['gt_bboxes'].numpy()).all())
# (3) Diagonal Flip
transform = RandomFlip(1.0, direction='diagonal')
results_update = transform.transform(copy.deepcopy(results1))
bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())
self.assertTrue((bbox_project(bboxes,
results_update['homography_matrix']) ==
results_update['gt_bboxes'].numpy()).all())
def test_repr(self):
transform = RandomFlip(0.1)
transform_str = str(transform)
self.assertIsInstance(transform_str, str)
class TestPad(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img': np.random.random((1333, 800, 3)),
'gt_masks':
BitmapMasks(rng.rand(4, 1333, 800), height=1333, width=800)
}
def test_transform(self):
# test pad img/gt_masks with size
transform = Pad(size=(1200, 2000))
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (2000, 1200))
self.assertEqual(results['gt_masks'].masks.shape[1:], (2000, 1200))
# test pad img/gt_masks with size_divisor
transform = Pad(size_divisor=11)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1342, 803))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 803))
# test pad img/gt_masks with pad_to_square
transform = Pad(pad_to_square=True)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1333, 1333))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1333, 1333))
# test pad img/gt_masks with pad_to_square and size_divisor
transform = Pad(pad_to_square=True, size_divisor=11)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1342, 1342))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 1342))
# test pad img/gt_masks with pad_to_square and size_divisor
transform = Pad(pad_to_square=True, size_divisor=11)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1342, 1342))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 1342))
def test_repr(self):
transform = Pad(
pad_to_square=True, size_divisor=11, padding_mode='edge')
self.assertEqual(
repr(transform),
('Pad(size=None, size_divisor=11, pad_to_square=True, '
"pad_val={'img': 0, 'seg': 255}), padding_mode=edge)"))
class TestMinIoURandomCrop(unittest.TestCase):
def test_transform(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape[:2]
gt_bboxes = create_random_bboxes(1, results['img_shape'][1],
results['img_shape'][0])
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
transform = MinIoURandomCrop()
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['gt_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
patch = np.array(
[0, 0, results['img_shape'][1], results['img_shape'][0]])
ious = bbox_overlaps(patch.reshape(-1, 4),
results['gt_bboxes']).reshape(-1)
mode = transform.mode
if mode == 1:
self.assertTrue(np.equal(results['gt_bboxes'], gt_bboxes).all())
else:
self.assertTrue((ious >= mode).all())
def test_transform_use_box_type(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape[:2]
gt_bboxes = create_random_bboxes(1, results['img_shape'][1],
results['img_shape'][0])
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)
transform = MinIoURandomCrop()
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['gt_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, torch.float32)
patch = np.array(
[0, 0, results['img_shape'][1], results['img_shape'][0]])
ious = bbox_overlaps(
patch.reshape(-1, 4), results['gt_bboxes'].numpy()).reshape(-1)
mode = transform.mode
if mode == 1:
self.assertTrue((results['gt_bboxes'].numpy() == gt_bboxes).all())
else:
self.assertTrue((ious >= mode).all())
def test_repr(self):
transform = MinIoURandomCrop()
self.assertEqual(
repr(transform), ('MinIoURandomCrop'
'(min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), '
'min_crop_size=0.3, '
'bbox_clip_border=True)'))
class TestPhotoMetricDistortion(unittest.TestCase):
def test_transform(self):
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
transform = PhotoMetricDistortion()
# test uint8 input
results = dict()
results['img'] = img
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['img'].dtype, np.float32)
# test float32 input
results = dict()
results['img'] = img.astype(np.float32)
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['img'].dtype, np.float32)
def test_repr(self):
transform = PhotoMetricDistortion()
self.assertEqual(
repr(transform), ('PhotoMetricDistortion'
'(brightness_delta=32, '
'contrast_range=(0.5, 1.5), '
'saturation_range=(0.5, 1.5), '
'hue_delta=18)'))
class TestExpand(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img': np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes': np.array([[0, 1, 100, 101]]),
'gt_masks':
BitmapMasks(rng.rand(1, 224, 224), height=224, width=224),
'gt_seg_map': np.random.random((224, 224))
}
def test_transform(self):
transform = Expand()
results = transform.transform(copy.deepcopy(self.results))
self.assertEqual(results['img_shape'], results['img'].shape[:2])
self.assertEqual(
results['img_shape'],
(results['gt_masks'].height, results['gt_masks'].width))
self.assertEqual(results['img_shape'], results['gt_seg_map'].shape)
def test_transform_use_box_type(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = Expand()
results = transform.transform(results)
self.assertEqual(
results['img_shape'],
(results['gt_masks'].height, results['gt_masks'].width))
self.assertEqual(results['img_shape'], results['gt_seg_map'].shape)
def test_repr(self):
transform = Expand()
self.assertEqual(
repr(transform), ('Expand'
'(mean=(0, 0, 0), to_rgb=True, '
'ratio_range=(1, 4), '
'seg_ignore_label=None, '
'prob=0.5)'))
class TestSegRescale(unittest.TestCase):
def setUp(self) -> None:
seg_map = np.random.randint(0, 255, size=(32, 32), dtype=np.int32)
self.results = {'gt_seg_map': seg_map}
def test_transform(self):
# test scale_factor != 1
transform = SegRescale(scale_factor=2)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['gt_seg_map'].shape[:2], (64, 64))
# test scale_factor = 1
transform = SegRescale(scale_factor=1)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['gt_seg_map'].shape[:2], (32, 32))
def test_repr(self):
transform = SegRescale(scale_factor=2)
self.assertEqual(
repr(transform), ('SegRescale(scale_factor=2, backend=cv2)'))
class TestRandomCrop(unittest.TestCase):
def test_init(self):
# test invalid crop_type
with self.assertRaisesRegex(ValueError, 'Invalid crop_type'):
RandomCrop(crop_size=(10, 10), crop_type='unknown')
crop_type_list = ['absolute', 'absolute_range']
for crop_type in crop_type_list:
# test h > 0 and w > 0
for crop_size in [(0, 0), (0, 1), (1, 0)]:
with self.assertRaises(AssertionError):
RandomCrop(crop_size=crop_size, crop_type=crop_type)
# test type(h) = int and type(w) = int
for crop_size in [(1.0, 1), (1, 1.0), (1.0, 1.0)]:
with self.assertRaises(AssertionError):
RandomCrop(crop_size=crop_size, crop_type=crop_type)
# test crop_size[0] <= crop_size[1]
with self.assertRaises(AssertionError):
RandomCrop(crop_size=(10, 5), crop_type='absolute_range')
# test h in (0, 1] and w in (0, 1]
crop_type_list = ['relative_range', 'relative']
for crop_type in crop_type_list:
for crop_size in [(0, 1), (1, 0), (1.1, 0.5), (0.5, 1.1)]:
with self.assertRaises(AssertionError):
RandomCrop(crop_size=crop_size, crop_type=crop_type)
def test_transform(self):
# test relative and absolute crop
src_results = {
'img': np.random.randint(0, 255, size=(24, 32), dtype=np.int32)
}
target_shape = (12, 16)
for crop_type, crop_size in zip(['relative', 'absolute'], [(0.5, 0.5),
(16, 12)]):
transform = RandomCrop(crop_size=crop_size, crop_type=crop_type)
results = transform(copy.deepcopy(src_results))
print(results['img'].shape[:2])
self.assertEqual(results['img'].shape[:2], target_shape)
# test absolute_range crop
transform = RandomCrop(crop_size=(10, 20), crop_type='absolute_range')
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertTrue(10 <= w <= 20)
self.assertTrue(10 <= h <= 20)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
# test relative_range crop
transform = RandomCrop(
crop_size=(0.5, 0.5), crop_type='relative_range')
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertTrue(16 <= w <= 32)
self.assertTrue(12 <= h <= 24)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
# test with gt_bboxes, gt_bboxes_labels, gt_ignore_flags,
# gt_masks, gt_seg_map
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.array([[0, 0, 7, 7], [2, 3, 9, 9]], dtype=np.float32)
gt_bboxes_labels = np.array([0, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 1], dtype=bool)
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks_[0, 0:7, 0:7] = 1
gt_masks_[1, 2:7, 3:8] = 1
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_seg_map = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
src_results = {
'img': img,
'gt_bboxes': gt_bboxes,
'gt_bboxes_labels': gt_bboxes_labels,
'gt_ignore_flags': gt_ignore_flags,
'gt_masks': gt_masks,
'gt_seg_map': gt_seg_map
}
transform = RandomCrop(
crop_size=(7, 5),
allow_negative_crop=False,
recompute_bbox=False,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertEqual(h, 5)
self.assertEqual(w, 7)
self.assertEqual(results['gt_bboxes'].shape[0], 2)
self.assertEqual(results['gt_bboxes_labels'].shape[0], 2)
self.assertEqual(results['gt_ignore_flags'].shape[0], 2)
self.assertTupleEqual(results['gt_seg_map'].shape[:2], (5, 7))
self.assertEqual(results['img_shape'], results['img'].shape[:2])
# test geometric transformation with homography matrix
bboxes = copy.deepcopy(src_results['gt_bboxes'])
self.assertTrue((bbox_project(bboxes, results['homography_matrix'],
(5, 7)) == results['gt_bboxes']).all())
# test recompute_bbox = True
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_bboxes = np.array([[0.1, 0.1, 0.2, 0.2]])
src_results = {
'img': img,
'gt_bboxes': gt_bboxes,
'gt_masks': gt_masks
}
target_gt_bboxes = np.zeros((1, 4), dtype=np.float32)
transform = RandomCrop(
crop_size=(10, 11),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue((results['gt_bboxes'] == target_gt_bboxes).all())
# test bbox_clip_border = False
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(
crop_size=(10, 11),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=False)
results = transform(copy.deepcopy(src_results))
self.assertTrue(
(results['gt_bboxes'] == src_results['gt_bboxes']).all())
# test the crop does not contain any gt-bbox
# allow_negative_crop = False
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 3), allow_negative_crop=False)
results = transform(copy.deepcopy(src_results))
self.assertIsNone(results)
# allow_negative_crop = True
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 3), allow_negative_crop=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue(isinstance(results, dict))
def test_transform_use_box_type(self):
# test with gt_bboxes, gt_bboxes_labels, gt_ignore_flags,
# gt_masks, gt_seg_map
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.array([[0, 0, 7, 7], [2, 3, 9, 9]], dtype=np.float32)
gt_bboxes_labels = np.array([0, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 1], dtype=bool)
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks_[0, 0:7, 0:7] = 1
gt_masks_[1, 2:7, 3:8] = 1
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_seg_map = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
src_results = {
'img': img,
'gt_bboxes': HorizontalBoxes(gt_bboxes),
'gt_bboxes_labels': gt_bboxes_labels,
'gt_ignore_flags': gt_ignore_flags,
'gt_masks': gt_masks,
'gt_seg_map': gt_seg_map
}
transform = RandomCrop(
crop_size=(7, 5),
allow_negative_crop=False,
recompute_bbox=False,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertEqual(h, 5)
self.assertEqual(w, 7)
self.assertEqual(results['gt_bboxes'].shape[0], 2)
self.assertEqual(results['gt_bboxes_labels'].shape[0], 2)
self.assertEqual(results['gt_ignore_flags'].shape[0], 2)
self.assertTupleEqual(results['gt_seg_map'].shape[:2], (5, 7))
# test geometric transformation with homography matrix
bboxes = copy.deepcopy(src_results['gt_bboxes'].numpy())
print(bboxes, results['gt_bboxes'])
self.assertTrue(
(bbox_project(bboxes, results['homography_matrix'],
(5, 7)) == results['gt_bboxes'].numpy()).all())
# test recompute_bbox = True
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_bboxes = HorizontalBoxes(np.array([[0.1, 0.1, 0.2, 0.2]]))
src_results = {
'img': img,
'gt_bboxes': gt_bboxes,
'gt_masks': gt_masks
}
target_gt_bboxes = np.zeros((1, 4), dtype=np.float32)
transform = RandomCrop(
crop_size=(10, 11),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue(
(results['gt_bboxes'].numpy() == target_gt_bboxes).all())
# test bbox_clip_border = False
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(
crop_size=(10, 10),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=False)
results = transform(copy.deepcopy(src_results))
self.assertTrue(
(results['gt_bboxes'].numpy() == src_results['gt_bboxes'].numpy()
).all())
# test the crop does not contain any gt-bbox
# allow_negative_crop = False
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = HorizontalBoxes(np.zeros((0, 4), dtype=np.float32))
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 2), allow_negative_crop=False)
results = transform(copy.deepcopy(src_results))
self.assertIsNone(results)
# allow_negative_crop = True
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = HorizontalBoxes(np.zeros((0, 4), dtype=np.float32))
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 2), allow_negative_crop=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue(isinstance(results, dict))
def test_repr(self):
crop_type = 'absolute'
crop_size = (10, 5)
allow_negative_crop = False
recompute_bbox = True
bbox_clip_border = False
transform = RandomCrop(
crop_size=crop_size,
crop_type=crop_type,
allow_negative_crop=allow_negative_crop,
recompute_bbox=recompute_bbox,
bbox_clip_border=bbox_clip_border)
self.assertEqual(
repr(transform),
f'RandomCrop(crop_size={crop_size}, crop_type={crop_type}, '
f'allow_negative_crop={allow_negative_crop}, '
f'recompute_bbox={recompute_bbox}, '
f'bbox_clip_border={bbox_clip_border})')
class TestCutOut(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
self.results = {'img': img}
def test_transform(self):
# test n_holes
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=(5, 3), cutout_shape=(8, 8))
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=(3, 4, 5), cutout_shape=(8, 8))
# test cutout_shape and cutout_ratio
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=1, cutout_shape=8)
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=1, cutout_ratio=0.2)
# either of cutout_shape and cutout_ratio should be given
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=1)
with self.assertRaises(AssertionError):
transform = CutOut(
n_holes=1, cutout_shape=(2, 2), cutout_ratio=(0.4, 0.4))
transform = CutOut(n_holes=1, cutout_shape=(10, 10))
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() < self.results['img'].sum())
transform = CutOut(
n_holes=(2, 4),
cutout_shape=[(10, 10), (15, 15)],
fill_in=(255, 255, 255))
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
transform = CutOut(
n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255))
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
def test_repr(self):
transform = CutOut(n_holes=1, cutout_shape=(10, 10))
self.assertEqual(
repr(transform), ('CutOut(n_holes=(1, 1), '
'cutout_shape=[(10, 10)], '
'fill_in=(0, 0, 0))'))
transform = CutOut(
n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255))
self.assertEqual(
repr(transform), ('CutOut(n_holes=(1, 1), '
'cutout_ratio=[(0.8, 0.8)], '
'fill_in=(255, 255, 255))'))
class TestMosaic(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = Mosaic(img_scale=640)
# test assertion for invalid probability
with self.assertRaises(AssertionError):
transform = Mosaic(prob=1.5)
transform = Mosaic(img_scale=(12, 10))
# test assertion for invalid mix_results
with self.assertRaises(AssertionError):
results = transform(copy.deepcopy(self.results))
self.results['mix_results'] = [copy.deepcopy(self.results)] * 3
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
def test_transform_with_no_gt(self):
self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64)
self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool)
transform = Mosaic(img_scale=(12, 10))
self.results['mix_results'] = [copy.deepcopy(self.results)] * 3
results = transform(copy.deepcopy(self.results))
self.assertIsInstance(results, dict)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(
results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].
shape[0] == results['gt_ignore_flags'].shape[0] == 0)
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_use_box_type(self):
transform = Mosaic(img_scale=(12, 10))
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
results['mix_results'] = [results] * 3
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = Mosaic(img_scale=(640, 640), )
self.assertEqual(
repr(transform), ('Mosaic(img_scale=(640, 640), '
'center_ratio_range=(0.5, 1.5), '
'pad_val=114.0, '
'prob=1.0)'))
class TestMixUp(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = MixUp(img_scale=640)
transform = MixUp(img_scale=(12, 10))
# test assertion for invalid mix_results
with self.assertRaises(AssertionError):
results = transform(copy.deepcopy(self.results))
with self.assertRaises(AssertionError):
self.results['mix_results'] = [copy.deepcopy(self.results)] * 2
results = transform(copy.deepcopy(self.results))
self.results['mix_results'] = [copy.deepcopy(self.results)]
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
def test_transform_use_box_type(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = MixUp(img_scale=(12, 10))
results['mix_results'] = [results]
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = MixUp(
img_scale=(640, 640),
ratio_range=(0.8, 1.6),
pad_val=114.0,
)
self.assertEqual(
repr(transform), ('MixUp(dynamic_scale=(640, 640), '
'ratio_range=(0.8, 1.6), '
'flip_ratio=0.5, '
'pad_val=114.0, '
'max_iters=15, '
'bbox_clip_border=True)'))
class TestRandomAffine(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
# test assertion for invalid translate_ratio
with self.assertRaises(AssertionError):
transform = RandomAffine(max_translate_ratio=1.5)
# test assertion for invalid scaling_ratio_range
with self.assertRaises(AssertionError):
transform = RandomAffine(scaling_ratio_range=(1.5, 0.5))
with self.assertRaises(AssertionError):
transform = RandomAffine(scaling_ratio_range=(0, 0.5))
transform = RandomAffine()
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
def test_transform_use_box_type(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = RandomAffine()
results = transform(copy.deepcopy(results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = RandomAffine(
scaling_ratio_range=(0.1, 2),
border=(-320, -320),
)
self.assertEqual(
repr(transform), ('RandomAffine(max_rotate_degree=10.0, '
'max_translate_ratio=0.1, '
'scaling_ratio_range=(0.1, 2), '
'max_shear_degree=2.0, '
'border=(-320, -320), '
'border_val=(114, 114, 114), '
'bbox_clip_border=True)'))
class TestYOLOXHSVRandomAug(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
self.results = {
'img':
img,
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
transform = YOLOXHSVRandomAug()
results = transform(copy.deepcopy(self.results))
self.assertTrue(
results['img'].shape[:2] == self.results['img'].shape[:2])
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = YOLOXHSVRandomAug()
self.assertEqual(
repr(transform), ('YOLOXHSVRandomAug(hue_delta=5, '
'saturation_delta=30, '
'value_delta=30)'))
class TestRandomCenterCropPad(unittest.TestCase):
def test_init(self):
# test assertion for invalid crop_size while test_mode=False
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(-1, 0), test_mode=False, test_pad_mode=None)
# test assertion for invalid ratios while test_mode=False
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(511, 511),
ratios=(1.0, 1.0),
test_mode=False,
test_pad_mode=None)
# test assertion for invalid mean, std and to_rgb
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(511, 511),
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=None)
# test assertion for invalid crop_size while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(511, 511),
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
# test assertion for invalid ratios while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
# test assertion for invalid border while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=None,
ratios=None,
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
# test assertion for invalid test_pad_mode while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('do_nothing', 100))
def test_transform(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
load = LoadImageFromFile(to_float32=True)
results = load(results)
test_results = copy.deepcopy(results)
h, w = results['img_shape']
gt_bboxes = create_random_bboxes(4, w, h)
gt_bboxes_labels = np.array([1, 2, 3, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 0, 1, 1], dtype=bool)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_labels'] = gt_bboxes_labels
results['gt_ignore_flags'] = gt_ignore_flags
crop_module = RandomCenterCropPad(
crop_size=(w - 20, h - 20),
ratios=(1.0, ),
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=False,
test_pad_mode=None)
train_results = crop_module(results)
assert train_results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert train_results['img_shape'][:2] == (h - 20, w - 20)
assert train_results['gt_bboxes'].shape[0] == 4
assert train_results['gt_bboxes'].dtype == np.float32
self.assertEqual(results['img_shape'], results['img'].shape[:2])
crop_module = RandomCenterCropPad(
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
test_results = crop_module(test_results)
assert test_results['img'].shape[:2] == (h | 127, w | 127)
assert test_results['img_shape'][:2] == (h | 127, w | 127)
assert 'border' in test_results
def test_transform_use_box_type(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
load = LoadImageFromFile(to_float32=True)
results = load(results)
test_results = copy.deepcopy(results)
h, w = results['img_shape']
gt_bboxes = create_random_bboxes(4, w, h)
gt_bboxes_labels = np.array([1, 2, 3, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 0, 1, 1], dtype=bool)
results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)
results['gt_bboxes_labels'] = gt_bboxes_labels
results['gt_ignore_flags'] = gt_ignore_flags
crop_module = RandomCenterCropPad(
crop_size=(w - 20, h - 20),
ratios=(1.0, ),
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=False,
test_pad_mode=None)
train_results = crop_module(results)
assert train_results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert train_results['img_shape'][:2] == (h - 20, w - 20)
assert train_results['gt_bboxes'].shape[0] == 4
assert train_results['gt_bboxes'].dtype == torch.float32
crop_module = RandomCenterCropPad(
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
test_results = crop_module(test_results)
assert test_results['img'].shape[:2] == (h | 127, w | 127)
assert test_results['img_shape'][:2] == (h | 127, w | 127)
assert 'border' in test_results
class TestCopyPaste(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
h, w, _ = img.shape
dst_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h],
[0.5 * w, 0.5 * h, 0.6 * w, 0.6 * h]],
dtype=np.float32)
src_bboxes = np.array([[0.1 * w, 0.1 * h, 0.3 * w, 0.5 * h],
[0.4 * w, 0.4 * h, 0.7 * w, 0.7 * h],
[0.8 * w, 0.8 * h, 0.9 * w, 0.9 * h]],
dtype=np.float32)
self.dst_results = {
'img': img.copy(),
'gt_bboxes': dst_bboxes,
'gt_bboxes_labels': np.ones(dst_bboxes.shape[0], dtype=np.int64),
'gt_masks': create_full_masks(dst_bboxes, w, h),
'gt_ignore_flags': np.array([0, 1], dtype=bool),
}
self.src_results = {
'img': img.copy(),
'gt_bboxes': src_bboxes,
'gt_bboxes_labels':
np.ones(src_bboxes.shape[0], dtype=np.int64) * 2,
'gt_masks': create_full_masks(src_bboxes, w, h),
'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
transform = CopyPaste(selected=False)
# test assertion for invalid mix_results
with self.assertRaises(AssertionError):
results = transform(copy.deepcopy(self.dst_results))
results = copy.deepcopy(self.dst_results)
results['mix_results'] = [copy.deepcopy(self.src_results)]
results = transform(results)
self.assertEqual(results['img'].shape[:2],
self.dst_results['img'].shape[:2])
# one object of destination image is totally occluded
self.assertEqual(
results['gt_bboxes'].shape[0],
self.dst_results['gt_bboxes'].shape[0] +
self.src_results['gt_bboxes'].shape[0] - 1)
self.assertEqual(
results['gt_bboxes_labels'].shape[0],
self.dst_results['gt_bboxes_labels'].shape[0] +
self.src_results['gt_bboxes_labels'].shape[0] - 1)
self.assertEqual(
results['gt_masks'].masks.shape[0],
self.dst_results['gt_masks'].masks.shape[0] +
self.src_results['gt_masks'].masks.shape[0] - 1)
self.assertEqual(
results['gt_ignore_flags'].shape[0],
self.dst_results['gt_ignore_flags'].shape[0] +
self.src_results['gt_ignore_flags'].shape[0] - 1)
# the object of destination image is partially occluded
ori_bbox = self.dst_results['gt_bboxes'][0]
occ_bbox = results['gt_bboxes'][0]
ori_mask = self.dst_results['gt_masks'].masks[0]
occ_mask = results['gt_masks'].masks[0]
self.assertTrue(ori_mask.sum() > occ_mask.sum())
self.assertTrue(
np.all(np.abs(occ_bbox - ori_bbox) <= transform.bbox_occluded_thr)
or occ_mask.sum() > transform.mask_occluded_thr)
# test copypaste with selected objects
transform = CopyPaste()
results = copy.deepcopy(self.dst_results)
results['mix_results'] = [copy.deepcopy(self.src_results)]
results = transform(results)
# test copypaste with an empty source image
results = copy.deepcopy(self.dst_results)
valid_inds = [False] * self.src_results['gt_bboxes'].shape[0]
results['mix_results'] = [{
'img':
self.src_results['img'].copy(),
'gt_bboxes':
self.src_results['gt_bboxes'][valid_inds],
'gt_bboxes_labels':
self.src_results['gt_bboxes_labels'][valid_inds],
'gt_masks':
self.src_results['gt_masks'][valid_inds],
'gt_ignore_flags':
self.src_results['gt_ignore_flags'][valid_inds],
}]
results = transform(results)
def test_transform_use_box_type(self):
src_results = copy.deepcopy(self.src_results)
src_results['gt_bboxes'] = HorizontalBoxes(src_results['gt_bboxes'])
dst_results = copy.deepcopy(self.dst_results)
dst_results['gt_bboxes'] = HorizontalBoxes(dst_results['gt_bboxes'])
transform = CopyPaste(selected=False)
results = copy.deepcopy(dst_results)
results['mix_results'] = [copy.deepcopy(src_results)]
results = transform(results)
self.assertEqual(results['img'].shape[:2],
self.dst_results['img'].shape[:2])
# one object of destination image is totally occluded
self.assertEqual(
results['gt_bboxes'].shape[0],
self.dst_results['gt_bboxes'].shape[0] +
self.src_results['gt_bboxes'].shape[0] - 1)
self.assertEqual(
results['gt_bboxes_labels'].shape[0],
self.dst_results['gt_bboxes_labels'].shape[0] +
self.src_results['gt_bboxes_labels'].shape[0] - 1)
self.assertEqual(
results['gt_masks'].masks.shape[0],
self.dst_results['gt_masks'].masks.shape[0] +
self.src_results['gt_masks'].masks.shape[0] - 1)
self.assertEqual(
results['gt_ignore_flags'].shape[0],
self.dst_results['gt_ignore_flags'].shape[0] +
self.src_results['gt_ignore_flags'].shape[0] - 1)
# the object of destination image is partially occluded
ori_bbox = dst_results['gt_bboxes'][0].numpy()
occ_bbox = results['gt_bboxes'][0].numpy()
ori_mask = dst_results['gt_masks'].masks[0]
occ_mask = results['gt_masks'].masks[0]
self.assertTrue(ori_mask.sum() > occ_mask.sum())
self.assertTrue(
np.all(np.abs(occ_bbox - ori_bbox) <= transform.bbox_occluded_thr)
or occ_mask.sum() > transform.mask_occluded_thr)
# test copypaste with selected objects
transform = CopyPaste()
results = copy.deepcopy(dst_results)
results['mix_results'] = [copy.deepcopy(src_results)]
results = transform(results)
# test copypaste with an empty source image
results = copy.deepcopy(dst_results)
valid_inds = [False] * self.src_results['gt_bboxes'].shape[0]
results['mix_results'] = [{
'img':
src_results['img'].copy(),
'gt_bboxes':
src_results['gt_bboxes'][valid_inds],
'gt_bboxes_labels':
src_results['gt_bboxes_labels'][valid_inds],
'gt_masks':
src_results['gt_masks'][valid_inds],
'gt_ignore_flags':
src_results['gt_ignore_flags'][valid_inds],
}]
results = transform(results)
def test_repr(self):
transform = CopyPaste()
self.assertEqual(
repr(transform), ('CopyPaste(max_num_pasted=100, '
'bbox_occluded_thr=10, '
'mask_occluded_thr=300, '
'selected=True)'))
class TestAlbu(unittest.TestCase):
@unittest.skipIf(albumentations is None, 'albumentations is not installed')
def test_transform(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = TRANSFORMS.build(load)
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = TRANSFORMS.build(albu_transform)
# Execute transforms
results = load(results)
results = albu_transform(results)
self.assertEqual(results['img'].dtype, np.uint8)
# test bbox
albu_transform = dict(
type='Albu',
transforms=[dict(type='ChannelShuffle', p=1)],
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
})
albu_transform = TRANSFORMS.build(albu_transform)
results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
results = albu_transform(results)
self.assertEqual(results['img'].dtype, np.float64)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertEqual(results['gt_ignore_flags'].dtype, bool)
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
@unittest.skipIf(albumentations is None, 'albumentations is not installed')
def test_repr(self):
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = TRANSFORMS.build(albu_transform)
self.assertEqual(
repr(albu_transform), 'Albu(transforms=['
'{\'type\': \'ChannelShuffle\', '
'\'p\': 1}])')
class TestCorrupt(unittest.TestCase):
def test_transform(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = TRANSFORMS.build(load)
corrupt_transform = dict(type='Corrupt', corruption='gaussian_blur')
corrupt_transform = TRANSFORMS.build(corrupt_transform)
# Execute transforms
results = load(results)
results = corrupt_transform(results)
self.assertEqual(results['img'].dtype, np.uint8)
def test_repr(self):
corrupt_transform = dict(type='Corrupt', corruption='gaussian_blur')
corrupt_transform = TRANSFORMS.build(corrupt_transform)
self.assertEqual(
repr(corrupt_transform), 'Corrupt(corruption=gaussian_blur, '
'severity=1)')
class TestRandomShift(unittest.TestCase):
def test_init(self):
# test assertion for invalid shift_ratio
with self.assertRaises(AssertionError):
RandomShift(prob=1.5)
# test assertion for invalid max_shift_px
with self.assertRaises(AssertionError):
RandomShift(max_shift_px=-1)
def test_transform(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
results['gt_bboxes_labels'] = np.ones(
gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
transform = RandomShift(prob=1.0)
results = transform(results)
self.assertEqual(results['img'].shape[:2], (h, w))
self.assertEqual(results['gt_bboxes_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
def test_transform_use_box_type(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
results['gt_bboxes_labels'] = np.ones(
gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)
transform = RandomShift(prob=1.0)
results = transform(results)
self.assertEqual(results['img'].shape[:2], (h, w))
self.assertEqual(results['gt_bboxes_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, torch.float32)
def test_repr(self):
transform = RandomShift()
self.assertEqual(
repr(transform), ('RandomShift(prob=0.5, '
'max_shift_px=32, '
'filter_thr_px=1)'))
class TestRandomErasing(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = construct_toy_data(poly2mask=True)
def test_transform(self):
transform = RandomErasing(
n_patches=(1, 5), ratio=(0.4, 0.8), img_border_value=0)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() < self.results['img'].sum())
transform = RandomErasing(
n_patches=1, ratio=0.999, img_border_value=255)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
# test empty results
empty_results = copy.deepcopy(self.results)
empty_results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32)
empty_results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)
empty_results['gt_masks'] = empty_results['gt_masks'][False]
empty_results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)
empty_results['gt_seg_map'] = np.ones_like(
empty_results['gt_seg_map']) * 255
results = transform(copy.deepcopy(empty_results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
def test_transform_use_box_type(self):
src_results = copy.deepcopy(self.results)
src_results['gt_bboxes'] = HorizontalBoxes(src_results['gt_bboxes'])
transform = RandomErasing(
n_patches=(1, 5), ratio=(0.4, 0.8), img_border_value=0)
results = transform(copy.deepcopy(src_results))
self.assertTrue(results['img'].sum() < src_results['img'].sum())
transform = RandomErasing(
n_patches=1, ratio=0.999, img_border_value=255)
results = transform(copy.deepcopy(src_results))
self.assertTrue(results['img'].sum() > src_results['img'].sum())
# test empty results
empty_results = copy.deepcopy(src_results)
empty_results['gt_bboxes'] = HorizontalBoxes([], dtype=torch.float32)
empty_results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)
empty_results['gt_masks'] = empty_results['gt_masks'][False]
empty_results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)
empty_results['gt_seg_map'] = np.ones_like(
empty_results['gt_seg_map']) * 255
results = transform(copy.deepcopy(empty_results))
self.assertTrue(results['img'].sum() > src_results['img'].sum())
def test_repr(self):
transform = RandomErasing(n_patches=(1, 5), ratio=(0, 0.2))
self.assertEqual(
repr(transform), ('RandomErasing(n_patches=(1, 5), '
'ratio=(0, 0.2), '
'squared=True, '
'bbox_erased_thr=0.9, '
'img_border_value=128, '
'mask_border_value=0, '
'seg_ignore_label=255)'))
| 72,142 | 41.139603 | 79 |
py
|
ERD
|
ERD-main/tests/test_datasets/test_transforms/test_geometric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
import numpy as np
from mmdet.datasets.transforms import (GeomTransform, Rotate, ShearX, ShearY,
TranslateX, TranslateY)
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
from .utils import check_result_same, construct_toy_data
class TestGeomTransform(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
self.img_border_value = (104, 116, 124)
self.seg_ignore_label = 255
def test_geomtransform(self):
# test assertion for invalid prob
with self.assertRaises(AssertionError):
transform = GeomTransform(
prob=-0.5, level=1, min_mag=0.0, max_mag=1.0)
# test assertion for invalid value of level
with self.assertRaises(AssertionError):
transform = GeomTransform(
prob=0.5, level=-1, min_mag=0.0, max_mag=1.0)
# test assertion for invalid value of min_mag and max_mag
with self.assertRaises(AssertionError):
transform = ShearX(prob=0.5, level=2, min_mag=1.0, max_mag=0.0)
# test assertion for the num of elements in tuple img_border_value
with self.assertRaises(AssertionError):
transform = GeomTransform(
prob=0.5,
level=1,
min_mag=0.0,
max_mag=1.0,
img_border_value=(128, 128, 128, 128))
# test ValueError for invalid type of img_border_value
with self.assertRaises(ValueError):
transform = GeomTransform(
prob=0.5,
level=1,
min_mag=0.0,
max_mag=1.0,
img_border_value=[128, 128, 128])
# test assertion for invalid value of img_border_value
with self.assertRaises(AssertionError):
transform = GeomTransform(
prob=0.5,
level=1,
min_mag=0.0,
max_mag=1.0,
img_border_value=(128, -1, 256))
# test case when no aug (prob=0)
transform = GeomTransform(
prob=0.,
level=10,
min_mag=0.0,
max_mag=1.0,
img_border_value=self.img_border_value)
results_wo_aug = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_aug, self.check_keys)
def test_repr(self):
transform = GeomTransform(
prob=0.5,
level=5,
min_mag=0.0,
max_mag=1.0,
)
self.assertEqual(
repr(transform), ('GeomTransform(prob=0.5, '
'level=5, '
'min_mag=0.0, '
'max_mag=1.0, '
'reversal_prob=0.5, '
'img_border_value=(128.0, 128.0, 128.0), '
'mask_border_value=0, '
'seg_ignore_label=255, '
'interpolation=bilinear)'))
class TestShearX(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
self.results_poly = construct_toy_data(poly2mask=False)
self.results_mask_boxtype = construct_toy_data(
poly2mask=True, use_box_type=True)
self.img_border_value = (104, 116, 124)
self.seg_ignore_label = 255
def test_shearx(self):
# test assertion for invalid value of min_mag
with self.assertRaises(AssertionError):
transform = ShearX(prob=0.5, level=2, min_mag=-30.)
# test assertion for invalid value of max_mag
with self.assertRaises(AssertionError):
transform = ShearX(prob=0.5, level=2, max_mag=100.)
# test case when no shear horizontally (level=0)
transform = ShearX(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label,
)
results_wo_shearx = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_shearx,
self.check_keys)
# test shear horizontally, magnitude=-1
transform = ShearX(
prob=1.0,
level=10,
max_mag=45.,
reversal_prob=1.0,
img_border_value=self.img_border_value)
results_sheared = transform(copy.deepcopy(self.results_mask))
results_gt = copy.deepcopy(self.results_mask)
img_gt = np.array([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 9, 10]],
dtype=np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
img_gt[1, 0, :] = np.array(self.img_border_value)
img_gt[2, 0, :] = np.array(self.img_border_value)
img_gt[2, 1, :] = np.array(self.img_border_value)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = np.array([[1, 0, 4, 2]], dtype=np.float32)
results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
gt_masks = np.array([[0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 255, 13, 13], [255, 255, 255, 13]],
dtype=self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_sheared, self.check_keys)
# test PolygonMasks with shear horizontally, magnitude=1
results_sheared = transform(copy.deepcopy(self.results_poly))
gt_masks = [[np.array([3, 2, 1, 0, 3, 1], dtype=np.float32)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)
check_result_same(results_gt, results_sheared, self.check_keys)
def test_shearx_use_box_type(self):
# test case when no shear horizontally (level=0)
transform = ShearX(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label,
)
results_wo_shearx = transform(copy.deepcopy(self.results_mask_boxtype))
check_result_same(self.results_mask_boxtype, results_wo_shearx,
self.check_keys)
# test shear horizontally, magnitude=-1
transform = ShearX(
prob=1.0,
level=10,
max_mag=45.,
reversal_prob=1.0,
img_border_value=self.img_border_value)
results_sheared = transform(copy.deepcopy(self.results_mask_boxtype))
results_gt = copy.deepcopy(self.results_mask_boxtype)
img_gt = np.array([[1, 2, 3, 4], [0, 5, 6, 7], [0, 0, 9, 10]],
dtype=np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
img_gt[1, 0, :] = np.array(self.img_border_value)
img_gt[2, 0, :] = np.array(self.img_border_value)
img_gt[2, 1, :] = np.array(self.img_border_value)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = HorizontalBoxes(
np.array([[1, 0, 4, 2]], dtype=np.float32))
results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
gt_masks = np.array([[0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 255, 13, 13], [255, 255, 255, 13]],
dtype=self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_sheared, self.check_keys)
def test_repr(self):
transform = ShearX(prob=0.5, level=10)
self.assertEqual(
repr(transform), ('ShearX(prob=0.5, '
'level=10, '
'min_mag=0.0, '
'max_mag=30.0, '
'reversal_prob=0.5, '
'img_border_value=(128.0, 128.0, 128.0), '
'mask_border_value=0, '
'seg_ignore_label=255, '
'interpolation=bilinear)'))
class TestShearY(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
self.results_poly = construct_toy_data(poly2mask=False)
self.results_mask_boxtype = construct_toy_data(
poly2mask=True, use_box_type=True)
self.img_border_value = (104, 116, 124)
self.seg_ignore_label = 255
def test_sheary(self):
# test assertion for invalid value of min_mag
with self.assertRaises(AssertionError):
transform = ShearY(prob=0.5, level=2, min_mag=-30.)
# test assertion for invalid value of max_mag
with self.assertRaises(AssertionError):
transform = ShearY(prob=0.5, level=2, max_mag=100.)
# test case when no shear vertically (level=0)
transform = ShearY(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label,
)
results_wo_sheary = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_sheary,
self.check_keys)
# test shear vertically, magnitude=1
transform = ShearY(prob=1., level=10, max_mag=45., reversal_prob=0.)
results_sheared = transform(copy.deepcopy(self.results_mask))
results_gt = copy.deepcopy(self.results_mask)
img_gt = np.array(
[[1, 6, 11, 128], [5, 10, 128, 128], [9, 128, 128, 128]],
dtype=np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = np.array([[1, 0, 2, 1]], dtype=np.float32)
results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 255, 255], [255, 255, 255, 255]],
dtype=self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_sheared, self.check_keys)
# test PolygonMasks with shear vertically, magnitude=-1
results_sheared = transform(copy.deepcopy(self.results_poly))
gt_masks = [[np.array([1, 1, 1, 0, 2, 0], dtype=np.float32)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)
check_result_same(results_gt, results_sheared, self.check_keys)
def test_sheary_use_box_type(self):
# test case when no shear vertically (level=0)
transform = ShearY(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label,
)
results_wo_sheary = transform(copy.deepcopy(self.results_mask_boxtype))
check_result_same(self.results_mask_boxtype, results_wo_sheary,
self.check_keys)
# test shear vertically, magnitude=1
transform = ShearY(prob=1., level=10, max_mag=45., reversal_prob=0.)
results_sheared = transform(copy.deepcopy(self.results_mask_boxtype))
results_gt = copy.deepcopy(self.results_mask_boxtype)
img_gt = np.array(
[[1, 6, 11, 128], [5, 10, 128, 128], [9, 128, 128, 128]],
dtype=np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = HorizontalBoxes(
np.array([[1, 0, 2, 1]], dtype=np.float32))
results_gt['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 255, 255], [255, 255, 255, 255]],
dtype=self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_sheared, self.check_keys)
def test_repr(self):
transform = ShearY(prob=0.5, level=10)
self.assertEqual(
repr(transform), ('ShearY(prob=0.5, '
'level=10, '
'min_mag=0.0, '
'max_mag=30.0, '
'reversal_prob=0.5, '
'img_border_value=(128.0, 128.0, 128.0), '
'mask_border_value=0, '
'seg_ignore_label=255, '
'interpolation=bilinear)'))
class TestRotate(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
self.results_poly = construct_toy_data(poly2mask=False)
self.results_mask_boxtype = construct_toy_data(
poly2mask=True, use_box_type=True)
self.img_border_value = (104, 116, 124)
self.seg_ignore_label = 255
def test_rotate(self):
# test assertion for invalid value of min_mag
with self.assertRaises(AssertionError):
transform = ShearY(prob=0.5, level=2, min_mag=-90.0)
# test assertion for invalid value of max_mag
with self.assertRaises(AssertionError):
transform = ShearY(prob=0.5, level=2, max_mag=270.0)
# test case when no rotate aug (level=0)
transform = Rotate(
prob=1.,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label,
)
results_wo_rotate = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_rotate,
self.check_keys)
# test clockwise rotation with angle 90
transform = Rotate(
prob=1.,
level=10,
max_mag=90.0,
# set reversal_prob to 1 for clockwise rotation
reversal_prob=1.,
)
results_rotated = transform(copy.deepcopy(self.results_mask))
# The image, masks, and semantic segmentation map
# will be bilinearly interpolated.
img_gt = np.array([[69, 8, 4, 65], [69, 9, 5, 65],
[70, 10, 6, 66]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt = copy.deepcopy(self.results_mask)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = np.array([[0.5, 0.5, 2.5, 1.5]],
dtype=np.float32)
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 13, 13], [255, 255, 13, 255],
[255, 255, 255,
255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_rotated, self.check_keys)
# test clockwise rotation with angle 90, PolygonMasks
results_rotated = transform(copy.deepcopy(self.results_poly))
gt_masks = [[np.array([0, 1, 0, 1, 0, 2], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)
check_result_same(results_gt, results_rotated, self.check_keys)
# test counter-clockwise rotation with angle 90
transform = Rotate(
prob=1.0,
level=10,
max_mag=90.0,
# set reversal_prob to 0 for counter-clockwise rotation
reversal_prob=0.0,
)
results_rotated = transform(copy.deepcopy(self.results_mask))
# The image, masks, and semantic segmentation map
# will be bilinearly interpolated.
img_gt = np.array([[66, 6, 10, 70], [65, 5, 9, 69],
[65, 4, 8, 69]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt = copy.deepcopy(self.results_mask)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = np.array([[0.5, 0.5, 2.5, 1.5]],
dtype=np.float32)
gt_masks = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 255, 255, 255], [255, 13, 255, 255],
[13, 13, 13, 255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_rotated, self.check_keys)
# test counter-clockwise rotation with angle 90, PolygonMasks
results_rotated = transform(copy.deepcopy(self.results_poly))
gt_masks = [[np.array([2, 0, 0, 0, 1, 0], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)
check_result_same(results_gt, results_rotated, self.check_keys)
def test_rotate_use_box_type(self):
# test case when no rotate aug (level=0)
transform = Rotate(
prob=1.,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label,
)
results_wo_rotate = transform(copy.deepcopy(self.results_mask_boxtype))
check_result_same(self.results_mask_boxtype, results_wo_rotate,
self.check_keys)
# test clockwise rotation with angle 90
transform = Rotate(
prob=1.,
level=10,
max_mag=90.0,
# set reversal_prob to 1 for clockwise rotation
reversal_prob=1.,
)
results_rotated = transform(copy.deepcopy(self.results_mask_boxtype))
# The image, masks, and semantic segmentation map
# will be bilinearly interpolated.
img_gt = np.array([[69, 8, 4, 65], [69, 9, 5, 65],
[70, 10, 6, 66]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt = copy.deepcopy(self.results_mask_boxtype)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = HorizontalBoxes(
np.array([[0.5, 0.5, 2.5, 1.5]], dtype=np.float32))
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 13, 13], [255, 255, 13, 255],
[255, 255, 255,
255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_rotated, self.check_keys)
# test counter-clockwise rotation with angle 90
transform = Rotate(
prob=1.0,
level=10,
max_mag=90.0,
# set reversal_prob to 0 for counter-clockwise rotation
reversal_prob=0.0,
)
results_rotated = transform(copy.deepcopy(self.results_mask_boxtype))
# The image, masks, and semantic segmentation map
# will be bilinearly interpolated.
img_gt = np.array([[66, 6, 10, 70], [65, 5, 9, 69],
[65, 4, 8, 69]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt = copy.deepcopy(self.results_mask_boxtype)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = HorizontalBoxes(
np.array([[0.5, 0.5, 2.5, 1.5]], dtype=np.float32))
gt_masks = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 255, 255, 255], [255, 13, 255, 255],
[13, 13, 13, 255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_rotated, self.check_keys)
def test_repr(self):
transform = Rotate(prob=0.5, level=5)
self.assertEqual(
repr(transform), ('Rotate(prob=0.5, '
'level=5, '
'min_mag=0.0, '
'max_mag=30.0, '
'reversal_prob=0.5, '
'img_border_value=(128.0, 128.0, 128.0), '
'mask_border_value=0, '
'seg_ignore_label=255, '
'interpolation=bilinear)'))
class TestTranslateX(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
self.results_poly = construct_toy_data(poly2mask=False)
self.results_mask_boxtype = construct_toy_data(
poly2mask=True, use_box_type=True)
self.img_border_value = (104, 116, 124)
self.seg_ignore_label = 255
def test_translatex(self):
# test assertion for invalid value of min_mag
with self.assertRaises(AssertionError):
transform = TranslateX(prob=0.5, level=2, min_mag=-1.)
# test assertion for invalid value of max_mag
with self.assertRaises(AssertionError):
transform = TranslateX(prob=0.5, level=2, max_mag=1.1)
# test case when level=0 (without translate aug)
transform = TranslateX(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label)
results_wo_translatex = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_translatex,
self.check_keys)
# test translate horizontally, magnitude=-1
transform = TranslateX(
prob=1.0,
level=10,
max_mag=0.3,
reversal_prob=0.0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label)
results_translated = transform(copy.deepcopy(self.results_mask))
img_gt = np.array([[2, 3, 4, 0], [6, 7, 8, 0], [10, 11, 12,
0]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
img_gt[:, 3, :] = np.array(self.img_border_value)
results_gt = copy.deepcopy(self.results_mask)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = np.array([[0, 0, 1, 2]], dtype=np.float32)
gt_masks = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[13, 255, 255, 255], [13, 13, 255, 255],
[13, 255, 255,
255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_translated, self.check_keys)
# test PolygonMasks with translate horizontally.
results_translated = transform(copy.deepcopy(self.results_poly))
gt_masks = [[np.array([0, 2, 0, 0, 1, 1], dtype=np.float32)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)
check_result_same(results_gt, results_translated, self.check_keys)
def test_translatex_use_box_type(self):
# test case when level=0 (without translate aug)
transform = TranslateX(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label)
results_wo_translatex = transform(
copy.deepcopy(self.results_mask_boxtype))
check_result_same(self.results_mask_boxtype, results_wo_translatex,
self.check_keys)
# test translate horizontally, magnitude=-1
transform = TranslateX(
prob=1.0,
level=10,
max_mag=0.3,
reversal_prob=0.0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label)
results_translated = transform(
copy.deepcopy(self.results_mask_boxtype))
img_gt = np.array([[2, 3, 4, 0], [6, 7, 8, 0], [10, 11, 12,
0]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
img_gt[:, 3, :] = np.array(self.img_border_value)
results_gt = copy.deepcopy(self.results_mask)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = HorizontalBoxes(
np.array([[0, 0, 1, 2]], dtype=np.float32))
gt_masks = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[13, 255, 255, 255], [13, 13, 255, 255],
[13, 255, 255,
255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_translated, self.check_keys)
def test_repr(self):
transform = TranslateX(prob=0.5, level=5)
self.assertEqual(
repr(transform), ('TranslateX(prob=0.5, '
'level=5, '
'min_mag=0.0, '
'max_mag=0.1, '
'reversal_prob=0.5, '
'img_border_value=(128.0, 128.0, 128.0), '
'mask_border_value=0, '
'seg_ignore_label=255, '
'interpolation=bilinear)'))
class TestTranslateY(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks',
'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
self.results_poly = construct_toy_data(poly2mask=False)
self.results_mask_boxtype = construct_toy_data(
poly2mask=True, use_box_type=True)
self.img_border_value = (104, 116, 124)
self.seg_ignore_label = 255
def test_translatey(self):
# test assertion for invalid value of min_mag
with self.assertRaises(AssertionError):
transform = TranslateY(prob=0.5, level=2, min_mag=-1.0)
# test assertion for invalid value of max_mag
with self.assertRaises(AssertionError):
transform = TranslateY(prob=0.5, level=2, max_mag=1.1)
# test case when level=0 (without translate aug)
transform = TranslateY(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label)
results_wo_translatey = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_translatey,
self.check_keys)
# test translate vertically, magnitude=-1
transform = TranslateY(
prob=1.0,
level=10,
max_mag=0.4,
reversal_prob=0.0,
seg_ignore_label=self.seg_ignore_label)
results_translated = transform(copy.deepcopy(self.results_mask))
img_gt = np.array([[5, 6, 7, 8], [9, 10, 11, 12],
[128, 128, 128, 128]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt = copy.deepcopy(self.results_mask)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = np.array([[1, 0, 2, 1]], dtype=np.float32)
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 13, 255], [255, 13, 255, 255],
[255, 255, 255,
255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_translated, self.check_keys)
# test PolygonMasks with translate vertically.
results_translated = transform(copy.deepcopy(self.results_poly))
gt_masks = [[np.array([1, 1, 1, 0, 2, 0], dtype=np.float32)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 3, 4)
check_result_same(results_gt, results_translated, self.check_keys)
def test_translatey_use_box_type(self):
# test case when level=0 (without translate aug)
transform = TranslateY(
prob=1.0,
level=0,
img_border_value=self.img_border_value,
seg_ignore_label=self.seg_ignore_label)
results_wo_translatey = transform(
copy.deepcopy(self.results_mask_boxtype))
check_result_same(self.results_mask_boxtype, results_wo_translatey,
self.check_keys)
# test translate vertically, magnitude=-1
transform = TranslateY(
prob=1.0,
level=10,
max_mag=0.4,
reversal_prob=0.0,
seg_ignore_label=self.seg_ignore_label)
results_translated = transform(
copy.deepcopy(self.results_mask_boxtype))
img_gt = np.array([[5, 6, 7, 8], [9, 10, 11, 12],
[128, 128, 128, 128]]).astype(np.uint8)
img_gt = np.stack([img_gt, img_gt, img_gt], axis=-1)
results_gt = copy.deepcopy(self.results_mask_boxtype)
results_gt['img'] = img_gt
results_gt['gt_bboxes'] = HorizontalBoxes(
np.array([[1, 0, 2, 1]], dtype=np.float32))
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
results_gt['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results_gt['gt_seg_map'] = np.array(
[[255, 13, 13, 255], [255, 13, 255, 255],
[255, 255, 255,
255]]).astype(self.results_mask['gt_seg_map'].dtype)
check_result_same(results_gt, results_translated, self.check_keys)
def test_repr(self):
transform = TranslateX(prob=0.5, level=5)
self.assertEqual(
repr(transform), ('TranslateX(prob=0.5, '
'level=5, '
'min_mag=0.0, '
'max_mag=0.1, '
'reversal_prob=0.5, '
'img_border_value=(128.0, 128.0, 128.0), '
'mask_border_value=0, '
'seg_ignore_label=255, '
'interpolation=bilinear)'))
| 34,115 | 44.306773 | 79 |
py
|
ERD
|
ERD-main/tests/test_apis/test_inference.py
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
| 2,737 | 34.558442 | 78 |
py
|
ERD
|
ERD-main/tests/test_apis/test_det_inferencer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase, mock
from unittest.mock import Mock, patch
import mmcv
import mmengine
import numpy as np
import torch
from mmengine.structures import InstanceData
from mmengine.utils import is_list_of
from parameterized import parameterized
from mmdet.apis import DetInferencer
from mmdet.evaluation.functional import get_classes
from mmdet.structures import DetDataSample
class TestDetInferencer(TestCase):
@mock.patch('mmengine.infer.infer._load_checkpoint', return_value=None)
def test_init(self, mock):
# init from metafile
DetInferencer('rtmdet-t')
# init from cfg
DetInferencer('configs/yolox/yolox_tiny_8xb8-300e_coco.py')
def assert_predictions_equal(self, preds1, preds2):
for pred1, pred2 in zip(preds1, preds2):
if 'bboxes' in pred1:
self.assertTrue(
np.allclose(pred1['bboxes'], pred2['bboxes'], 0.1))
if 'scores' in pred1:
self.assertTrue(
np.allclose(pred1['scores'], pred2['scores'], 0.1))
if 'labels' in pred1:
self.assertTrue(np.allclose(pred1['labels'], pred2['labels']))
if 'panoptic_seg_path' in pred1:
self.assertTrue(
pred1['panoptic_seg_path'] == pred2['panoptic_seg_path'])
@parameterized.expand([
'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'
])
def test_call(self, model):
# single img
img_path = 'tests/data/color.jpg'
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
# In the case of not loading the pretrained weight, the category
# defaults to COCO 80, so it needs to be replaced.
if model == 'panoptic_fpn_r50_fpn_1x_coco':
inferencer.visualizer.dataset_meta = {
'classes': get_classes('coco_panoptic'),
'palette': 'random'
}
res_path = inferencer(img_path, return_vis=True)
# ndarray
img = mmcv.imread(img_path)
res_ndarray = inferencer(img, return_vis=True)
self.assert_predictions_equal(res_path['predictions'],
res_ndarray['predictions'])
self.assertIn('visualization', res_path)
self.assertIn('visualization', res_ndarray)
# multiple images
img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']
res_path = inferencer(img_paths, return_vis=True)
# list of ndarray
imgs = [mmcv.imread(p) for p in img_paths]
res_ndarray = inferencer(imgs, return_vis=True)
self.assert_predictions_equal(res_path['predictions'],
res_ndarray['predictions'])
self.assertIn('visualization', res_path)
self.assertIn('visualization', res_ndarray)
# img dir, test different batch sizes
img_dir = 'tests/data/VOCdevkit/VOC2007/JPEGImages/'
res_bs1 = inferencer(img_dir, batch_size=1, return_vis=True)
res_bs3 = inferencer(img_dir, batch_size=3, return_vis=True)
self.assert_predictions_equal(res_bs1['predictions'],
res_bs3['predictions'])
# There is a jitter operation when the mask is drawn,
# so it cannot be asserted.
if model == 'rtmdet-t':
for res_bs1_vis, res_bs3_vis in zip(res_bs1['visualization'],
res_bs3['visualization']):
self.assertTrue(np.allclose(res_bs1_vis, res_bs3_vis))
@parameterized.expand([
'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'
])
def test_visualize(self, model):
img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
# In the case of not loading the pretrained weight, the category
# defaults to COCO 80, so it needs to be replaced.
if model == 'panoptic_fpn_r50_fpn_1x_coco':
inferencer.visualizer.dataset_meta = {
'classes': get_classes('coco_panoptic'),
'palette': 'random'
}
with tempfile.TemporaryDirectory() as tmp_dir:
inferencer(img_paths, out_dir=tmp_dir)
for img_dir in ['color.jpg', 'gray.jpg']:
self.assertTrue(osp.exists(osp.join(tmp_dir, 'vis', img_dir)))
@parameterized.expand([
'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'
])
def test_postprocess(self, model):
# return_datasample
img_path = 'tests/data/color.jpg'
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
# In the case of not loading the pretrained weight, the category
# defaults to COCO 80, so it needs to be replaced.
if model == 'panoptic_fpn_r50_fpn_1x_coco':
inferencer.visualizer.dataset_meta = {
'classes': get_classes('coco_panoptic'),
'palette': 'random'
}
res = inferencer(img_path, return_datasample=True)
self.assertTrue(is_list_of(res['predictions'], DetDataSample))
with tempfile.TemporaryDirectory() as tmp_dir:
res = inferencer(img_path, out_dir=tmp_dir, no_save_pred=False)
dumped_res = mmengine.load(
osp.join(tmp_dir, 'preds', 'color.json'))
self.assertEqual(res['predictions'][0], dumped_res)
@mock.patch('mmengine.infer.infer._load_checkpoint', return_value=None)
def test_pred2dict(self, mock):
data_sample = DetDataSample()
data_sample.pred_instances = InstanceData()
data_sample.pred_instances.bboxes = np.array([[0, 0, 1, 1]])
data_sample.pred_instances.labels = np.array([0])
data_sample.pred_instances.scores = torch.FloatTensor([0.9])
res = DetInferencer('rtmdet-t').pred2dict(data_sample)
self.assertListAlmostEqual(res['bboxes'], [[0, 0, 1, 1]])
self.assertListAlmostEqual(res['labels'], [0])
self.assertListAlmostEqual(res['scores'], [0.9])
def assertListAlmostEqual(self, list1, list2, places=7):
for i in range(len(list1)):
if isinstance(list1[i], list):
self.assertListAlmostEqual(list1[i], list2[i], places=places)
else:
self.assertAlmostEqual(list1[i], list2[i], places=places)
| 6,856 | 40.307229 | 79 |
py
|
ERD
|
ERD-main/tests/test_utils/test_setup_env.py
|
import datetime
import sys
from unittest import TestCase
from mmengine import DefaultScope
from mmdet.utils import register_all_modules
class TestSetupEnv(TestCase):
def test_register_all_modules(self):
from mmdet.registry import DATASETS
# not init default scope
sys.modules.pop('mmdet.datasets', None)
sys.modules.pop('mmdet.datasets.coco', None)
DATASETS._module_dict.pop('CocoDataset', None)
self.assertFalse('CocoDataset' in DATASETS.module_dict)
register_all_modules(init_default_scope=False)
self.assertTrue('CocoDataset' in DATASETS.module_dict)
# init default scope
sys.modules.pop('mmdet.datasets')
sys.modules.pop('mmdet.datasets.coco')
DATASETS._module_dict.pop('CocoDataset', None)
self.assertFalse('CocoDataset' in DATASETS.module_dict)
register_all_modules(init_default_scope=True)
self.assertTrue('CocoDataset' in DATASETS.module_dict)
self.assertEqual(DefaultScope.get_current_instance().scope_name,
'mmdet')
# init default scope when another scope is init
name = f'test-{datetime.datetime.now()}'
DefaultScope.get_instance(name, scope_name='test')
with self.assertWarnsRegex(
Warning, 'The current default scope "test" is not "mmdet"'):
register_all_modules(init_default_scope=True)
| 1,426 | 35.589744 | 76 |
py
|
ERD
|
ERD-main/tests/test_utils/test_memory.py
|
import numpy as np
import pytest
import torch
from mmdet.utils import AvoidOOM
from mmdet.utils.memory import cast_tensor_type
def test_avoidoom():
tensor = torch.from_numpy(np.random.random((20, 20)))
if torch.cuda.is_available():
tensor = tensor.cuda()
# get default result
default_result = torch.mm(tensor, tensor.transpose(1, 0))
# when not occurred OOM error
AvoidCudaOOM = AvoidOOM()
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.equal(default_result, result)
# calculate with fp16 and convert back to source type
AvoidCudaOOM = AvoidOOM(test=True)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.allclose(default_result, result, 1e-3)
# calculate on cpu and convert back to source device
AvoidCudaOOM = AvoidOOM(test=True)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert result.dtype == default_result.dtype and \
result.device == default_result.device and \
torch.allclose(default_result, result)
# do not calculate on cpu and the outputs will be same as input
AvoidCudaOOM = AvoidOOM(test=True, to_cpu=False)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert result.dtype == default_result.dtype and \
result.device == default_result.device
else:
default_result = torch.mm(tensor, tensor.transpose(1, 0))
AvoidCudaOOM = AvoidOOM()
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.equal(default_result, result)
def test_cast_tensor_type():
inputs = torch.rand(10)
if torch.cuda.is_available():
inputs = inputs.cuda()
with pytest.raises(AssertionError):
cast_tensor_type(inputs, src_type=None, dst_type=None)
# input is a float
out = cast_tensor_type(10., dst_type=torch.half)
assert out == 10. and isinstance(out, float)
# convert Tensor to fp16 and re-convert to fp32
fp16_out = cast_tensor_type(inputs, dst_type=torch.half)
assert fp16_out.dtype == torch.half
fp32_out = cast_tensor_type(fp16_out, dst_type=torch.float32)
assert fp32_out.dtype == torch.float32
# input is a list
list_input = [inputs, inputs]
list_outs = cast_tensor_type(list_input, dst_type=torch.half)
assert len(list_outs) == len(list_input) and \
isinstance(list_outs, list)
for out in list_outs:
assert out.dtype == torch.half
# input is a dict
dict_input = {'test1': inputs, 'test2': inputs}
dict_outs = cast_tensor_type(dict_input, dst_type=torch.half)
assert len(dict_outs) == len(dict_input) and \
isinstance(dict_outs, dict)
# convert the input tensor to CPU and re-convert to GPU
if torch.cuda.is_available():
cpu_device = torch.empty(0).device
gpu_device = inputs.device
cpu_out = cast_tensor_type(inputs, dst_type=cpu_device)
assert cpu_out.device == cpu_device
gpu_out = cast_tensor_type(inputs, dst_type=gpu_device)
assert gpu_out.device == gpu_device
| 4,261 | 42.050505 | 75 |
py
|
ERD
|
ERD-main/tests/test_utils/test_replace_cfg_vals.py
|
import os.path as osp
import tempfile
from copy import deepcopy
import pytest
from mmengine.config import Config
from mmdet.utils import replace_cfg_vals
def test_replace_cfg_vals():
temp_file = tempfile.NamedTemporaryFile()
cfg_path = f'{temp_file.name}.py'
with open(cfg_path, 'w') as f:
f.write('configs')
ori_cfg_dict = dict()
ori_cfg_dict['cfg_name'] = osp.basename(temp_file.name)
ori_cfg_dict['work_dir'] = 'work_dirs/${cfg_name}/${percent}/${fold}'
ori_cfg_dict['percent'] = 5
ori_cfg_dict['fold'] = 1
ori_cfg_dict['model_wrapper'] = dict(
type='SoftTeacher', detector='${model}')
ori_cfg_dict['model'] = dict(
type='FasterRCNN',
backbone=dict(type='ResNet'),
neck=dict(type='FPN'),
rpn_head=dict(type='RPNHead'),
roi_head=dict(type='StandardRoIHead'),
train_cfg=dict(
rpn=dict(
assigner=dict(type='MaxIoUAssigner'),
sampler=dict(type='RandomSampler'),
),
rpn_proposal=dict(nms=dict(type='nms', iou_threshold=0.7)),
rcnn=dict(
assigner=dict(type='MaxIoUAssigner'),
sampler=dict(type='RandomSampler'),
),
),
test_cfg=dict(
rpn=dict(nms=dict(type='nms', iou_threshold=0.7)),
rcnn=dict(nms=dict(type='nms', iou_threshold=0.5)),
),
)
ori_cfg_dict['iou_threshold'] = dict(
rpn_proposal_nms='${model.train_cfg.rpn_proposal.nms.iou_threshold}',
test_rpn_nms='${model.test_cfg.rpn.nms.iou_threshold}',
test_rcnn_nms='${model.test_cfg.rcnn.nms.iou_threshold}',
)
ori_cfg_dict['str'] = 'Hello, world!'
ori_cfg_dict['dict'] = {'Hello': 'world!'}
ori_cfg_dict['list'] = [
'Hello, world!',
]
ori_cfg_dict['tuple'] = ('Hello, world!', )
ori_cfg_dict['test_str'] = 'xxx${str}xxx'
ori_cfg = Config(ori_cfg_dict, filename=cfg_path)
updated_cfg = replace_cfg_vals(deepcopy(ori_cfg))
assert updated_cfg.work_dir \
== f'work_dirs/{osp.basename(temp_file.name)}/5/1'
assert updated_cfg.model.detector == ori_cfg.model
assert updated_cfg.iou_threshold.rpn_proposal_nms \
== ori_cfg.model.train_cfg.rpn_proposal.nms.iou_threshold
assert updated_cfg.test_str == 'xxxHello, world!xxx'
ori_cfg_dict['test_dict'] = 'xxx${dict}xxx'
ori_cfg_dict['test_list'] = 'xxx${list}xxx'
ori_cfg_dict['test_tuple'] = 'xxx${tuple}xxx'
with pytest.raises(AssertionError):
cfg = deepcopy(ori_cfg)
cfg['test_dict'] = 'xxx${dict}xxx'
updated_cfg = replace_cfg_vals(cfg)
with pytest.raises(AssertionError):
cfg = deepcopy(ori_cfg)
cfg['test_list'] = 'xxx${list}xxx'
updated_cfg = replace_cfg_vals(cfg)
with pytest.raises(AssertionError):
cfg = deepcopy(ori_cfg)
cfg['test_tuple'] = 'xxx${tuple}xxx'
updated_cfg = replace_cfg_vals(cfg)
| 2,985 | 34.547619 | 77 |
py
|
ERD
|
ERD-main/tests/test_utils/test_benchmark.py
|
import copy
import os
import tempfile
import unittest
import torch
from mmengine import Config, MMLogger
from mmengine.dataset import Compose
from mmengine.model import BaseModel
from torch.utils.data import Dataset
from mmdet.registry import DATASETS, MODELS
from mmdet.utils import register_all_modules
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
@MODELS.register_module()
class ToyDetector(BaseModel):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
pass
@DATASETS.register_module()
class ToyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __init__(self):
self.pipeline = Compose([lambda x: x])
def __len__(self):
return self.data.size(0)
def get_data_info(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
@DATASETS.register_module()
class ToyFullInitDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __init__(self):
self.pipeline = Compose([lambda x: x])
def __len__(self):
return self.data.size(0)
def get_data_info(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
def full_init(self):
pass
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestInferenceBenchmark(unittest.TestCase):
def setUp(self) -> None:
register_all_modules()
self.cfg = Config(
dict(
model=dict(type='ToyDetector'),
test_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=1),
env_cfg=dict(dist_cfg=dict(backend='nccl'))))
self.max_iter = 10
self.log_interval = 5
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_init_and_run(self):
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
torch.save(ToyDetector().state_dict(), checkpoint_path)
cfg = copy.deepcopy(self.cfg)
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
results = inference_benchmark.run()
self.assertTrue(isinstance(results, dict))
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
self.assertTrue(inference_benchmark.data_loader.num_workers == 0)
self.assertTrue(inference_benchmark.data_loader.batch_size == 1)
results = inference_benchmark.run(1)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
self.assertTrue(inference_benchmark.data_loader.num_workers == 0)
self.assertTrue(inference_benchmark.data_loader.batch_size == 1)
# test repeat
results = inference_benchmark.run(3)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 3)
# test cudnn_benchmark
cfg = copy.deepcopy(self.cfg)
cfg.env_cfg.cudnn_benchmark = True
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
inference_benchmark.run(1)
# test mp_cfg
cfg = copy.deepcopy(self.cfg)
cfg.env_cfg.cudnn_benchmark = True
cfg.env_cfg.mp_cfg = {
'mp_start_method': 'fork',
'opencv_num_threads': 1
}
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
inference_benchmark.run(1)
# test fp16
cfg = copy.deepcopy(self.cfg)
cfg.fp16 = True
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
inference_benchmark.run(1)
# test logger
logger = MMLogger.get_instance(
'mmdet', log_file='temp.log', log_level='INFO')
inference_benchmark = InferenceBenchmark(
cfg,
checkpoint_path,
False,
False,
self.max_iter,
self.log_interval,
logger=logger)
inference_benchmark.run(1)
self.assertTrue(os.path.exists('temp.log'))
os.remove(checkpoint_path)
os.remove('temp.log')
class TestDataLoaderBenchmark(unittest.TestCase):
def setUp(self) -> None:
register_all_modules()
self.cfg = Config(
dict(
model=dict(type='ToyDetector'),
train_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=2,
num_workers=1),
val_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=1,
num_workers=2),
test_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=1),
env_cfg=dict(dist_cfg=dict(backend='nccl'))))
self.max_iter = 5
self.log_interval = 1
self.num_warmup = 1
def test_init_and_run(self):
cfg = copy.deepcopy(self.cfg)
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',
self.max_iter,
self.log_interval,
self.num_warmup)
results = dataloader_benchmark.run(1)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)
self.assertTrue(dataloader_benchmark.data_loader.batch_size == 2)
# test repeat
results = dataloader_benchmark.run(3)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 3)
# test dataset_type input parameters error
with self.assertRaises(AssertionError):
DataLoaderBenchmark(cfg, False, 'training', self.max_iter,
self.log_interval, self.num_warmup)
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'val',
self.max_iter,
self.log_interval,
self.num_warmup)
self.assertTrue(dataloader_benchmark.data_loader.num_workers == 2)
self.assertTrue(dataloader_benchmark.data_loader.batch_size == 1)
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'test',
self.max_iter,
self.log_interval,
self.num_warmup)
self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)
self.assertTrue(dataloader_benchmark.data_loader.batch_size == 3)
# test mp_cfg
cfg = copy.deepcopy(self.cfg)
cfg.env_cfg.mp_cfg = {
'mp_start_method': 'fork',
'opencv_num_threads': 1
}
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',
self.max_iter,
self.log_interval,
self.num_warmup)
dataloader_benchmark.run(1)
class TestDatasetBenchmark(unittest.TestCase):
def setUp(self) -> None:
register_all_modules()
self.cfg = Config(
dict(
model=dict(type='ToyDetector'),
train_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=2,
num_workers=1),
val_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=1,
num_workers=2),
test_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=1)))
self.max_iter = 5
self.log_interval = 1
self.num_warmup = 1
def test_init_and_run(self):
cfg = copy.deepcopy(self.cfg)
dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,
self.log_interval,
self.num_warmup)
results = dataset_benchmark.run(1)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
# test repeat
results = dataset_benchmark.run(3)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 3)
# test test dataset
dataset_benchmark = DatasetBenchmark(cfg, 'test', self.max_iter,
self.log_interval,
self.num_warmup)
dataset_benchmark.run(1)
# test val dataset
dataset_benchmark = DatasetBenchmark(cfg, 'val', self.max_iter,
self.log_interval,
self.num_warmup)
dataset_benchmark.run(1)
# test dataset_type input parameters error
with self.assertRaises(AssertionError):
DatasetBenchmark(cfg, 'training', self.max_iter, self.log_interval,
self.num_warmup)
# test full_init
cfg = copy.deepcopy(self.cfg)
cfg.test_dataloader.dataset = dict(type='ToyFullInitDataset')
dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,
self.log_interval,
self.num_warmup)
dataset_benchmark.run(1)
| 11,696 | 36.732258 | 79 |
py
|
ERD
|
ERD-main/tests/test_visualization/test_palette.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.datasets import CocoDataset
from mmdet.visualization import get_palette, jitter_color, palette_val
def test_palette():
assert palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
palette_ = get_palette(palette, 3)
for color, color_ in zip(palette, palette_):
assert color == color_
# test tuple
palette = get_palette((1, 2, 3), 3)
assert len(palette) == 3
for color in palette:
assert color == (1, 2, 3)
# test color str
palette = get_palette('red', 3)
assert len(palette) == 3
for color in palette:
assert color == (255, 0, 0)
# test dataset str
palette = get_palette('coco', len(CocoDataset.METAINFO['classes']))
assert len(palette) == len(CocoDataset.METAINFO['classes'])
assert palette[0] == (220, 20, 60)
# TODO: Awaiting refactoring
# palette = get_palette('coco', len(CocoPanopticDataset.METAINFO['CLASSES'])) # noqa
# assert len(palette) == len(CocoPanopticDataset.METAINFO['CLASSES'])
# assert palette[-1] == (250, 141, 255)
# palette = get_palette('voc', len(VOCDataset.METAINFO['CLASSES']))
# assert len(palette) == len(VOCDataset.METAINFO['CLASSES'])
# assert palette[0] == (106, 0, 228)
# palette = get_palette('citys', len(CityscapesDataset.METAINFO['CLASSES'])) # noqa
# assert len(palette) == len(CityscapesDataset.METAINFO['CLASSES'])
# assert palette[0] == (220, 20, 60)
# test random
palette1 = get_palette('random', 3)
palette2 = get_palette(None, 3)
for color1, color2 in zip(palette1, palette2):
assert isinstance(color1, tuple)
assert isinstance(color2, tuple)
assert color1 == color2
def test_jitter_color():
color = tuple(np.random.randint(0, 255, 3, np.uint8))
jittered_color = jitter_color(color)
for c in jittered_color:
assert 0 <= c <= 255
| 2,013 | 33.135593 | 88 |
py
|
ERD
|
ERD-main/tests/test_visualization/test_local_visualizer.py
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
det_data_sample = DetDataSample()
det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_pred=False)
# test out_file
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_gt=False, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(classes=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
det_data_sample = DetDataSample()
det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
| 4,149 | 33.297521 | 78 |
py
|
ERD
|
ERD-main/demo/video_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 2,771 | 32 | 79 |
py
|
ERD
|
ERD-main/demo/create_result_gif.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
from mmengine.utils import scandir
try:
import imageio
except ImportError:
imageio = None
# TODO verify after refactoring analyze_results.py
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
| 5,011 | 29.192771 | 79 |
py
|
ERD
|
ERD-main/demo/webcam_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
# build the model from a config file and a checkpoint file
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
name='result',
image=img,
data_sample=result,
draw_gt=False,
pred_score_thr=args.score_thr,
show=False)
img = visualizer.get_image()
img = mmcv.imconvert(img, 'bgr', 'rgb')
cv2.imshow('result', img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if __name__ == '__main__':
main()
| 1,930 | 28.257576 | 78 |
py
|
ERD
|
ERD-main/demo/image_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Image Demo.
This script adopts a new infenence class, currently supports image path,
np.array and folder input formats, and will support video and webcam
in the future.
Example:
Save visualizations and predictions results::
python demo/image_demo.py demo/demo.jpg rtmdet-s
python demo/image_demo.py demo/demo.jpg \
configs/rtmdet/rtmdet_s_8xb32-300e_coco.py \
--weights rtmdet_s_8xb32-300e_coco_20220905_161602-387a891e.pth
Visualize prediction results::
python demo/image_demo.py demo/demo.jpg rtmdet-ins-s --show
python demo/image_demo.py demo/demo.jpg rtmdet-ins_s_8xb32-300e_coco \
--show
"""
from argparse import ArgumentParser
from mmengine.logging import print_log
from mmdet.apis import DetInferencer
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'inputs', type=str, help='Input image file or folder path.')
parser.add_argument(
'model',
type=str,
help='Config or checkpoint .pth file or the model name '
'and alias defined in metafile. The model configuration '
'file will try to read from .pth if the parameter is '
'a .pth weights file.')
parser.add_argument('--weights', default=None, help='Checkpoint file')
parser.add_argument(
'--out-dir',
type=str,
default='outputs',
help='Output directory of images or prediction results.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--pred-score-thr',
type=float,
default=0.3,
help='bbox score threshold')
parser.add_argument(
'--batch-size', type=int, default=1, help='Inference batch size.')
parser.add_argument(
'--show',
action='store_true',
help='Display the image in a popup window.')
parser.add_argument(
'--no-save-vis',
action='store_true',
help='Do not save detection vis results')
parser.add_argument(
'--no-save-pred',
action='store_true',
help='Do not save detection json results')
parser.add_argument(
'--print-result',
action='store_true',
help='Whether to print the results.')
parser.add_argument(
'--palette',
default='none',
choices=['coco', 'voc', 'citys', 'random', 'none'],
help='Color palette used for visualization')
call_args = vars(parser.parse_args())
if call_args['no_save_vis'] and call_args['no_save_pred']:
call_args['out_dir'] = ''
if call_args['model'].endswith('.pth'):
print_log('The model is a weight file, automatically '
'assign the model to --weights')
call_args['weights'] = call_args['model']
call_args['model'] = None
init_kws = ['model', 'weights', 'device', 'palette']
init_args = {}
for init_kw in init_kws:
init_args[init_kw] = call_args.pop(init_kw)
return init_args, call_args
def main():
init_args, call_args = parse_args()
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
inferencer = DetInferencer(**init_args)
inferencer(**call_args)
if call_args['out_dir'] != '' and not (call_args['no_save_vis']
and call_args['no_save_pred']):
print_log(f'results have been saved at {call_args["out_dir"]}')
if __name__ == '__main__':
main()
| 3,648 | 31.008772 | 78 |
py
|
ERD
|
ERD-main/demo/video_gpuaccel_demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 4,725 | 32.048951 | 77 |
py
|
ERD
|
ERD-main/configs/conditional_detr/conditional-detr_r50_8xb2-50e_coco.py
|
_base_ = ['../detr/detr_r50_8xb2-150e_coco.py']
model = dict(
type='ConditionalDETR',
num_queries=300,
decoder=dict(
num_layers=6,
layer_cfg=dict(
self_attn_cfg=dict(
_delete_=True,
embed_dims=256,
num_heads=8,
attn_drop=0.1,
cross_attn=False),
cross_attn_cfg=dict(
_delete_=True,
embed_dims=256,
num_heads=8,
attn_drop=0.1,
cross_attn=True))),
bbox_head=dict(
type='ConditionalDETRHead',
loss_cls=dict(
_delete_=True,
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2.0),
dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])))
# learning policy
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=50, val_interval=1)
param_scheduler = [dict(type='MultiStepLR', end=50, milestones=[40])]
| 1,321 | 29.744186 | 75 |
py
|
ERD
|
ERD-main/configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
loss_cls=dict(
_delete_=True,
type='GHMC',
bins=30,
momentum=0.75,
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
_delete_=True,
type='GHMR',
mu=0.02,
bins=10,
momentum=0.7,
loss_weight=10.0)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
| 509 | 25.842105 | 62 |
py
|
ERD
|
ERD-main/configs/ghm/retinanet_x101-32x4d_fpn_ghm-1x_coco.py
|
_base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 423 | 27.266667 | 76 |
py
|
ERD
|
ERD-main/configs/ghm/retinanet_x101-64x4d_fpn_ghm-1x_coco.py
|
_base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 423 | 27.266667 | 76 |
py
|
ERD
|
ERD-main/configs/ghm/retinanet_r101_fpn_ghm-1x_coco.py
|
_base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 201 | 27.857143 | 61 |
py
|
ERD
|
ERD-main/configs/dcn/faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 557 | 31.823529 | 76 |
py
|
ERD
|
ERD-main/configs/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 210 | 34.166667 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/cascade-mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 222 | 36.166667 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/cascade-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 217 | 35.333333 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/faster-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 215 | 35 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/cascade-mask-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 228 | 37.166667 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 214 | 34.833333 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/mask-rcnn_r50-dconv-c3-c5_fpn_amp-1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
| 391 | 34.636364 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/mask-rcnn_r101-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 211 | 34.333333 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/faster-rcnn_r50_fpn_dpool_1x_coco.py
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
_delete_=True,
type='DeformRoIPoolPack',
output_size=7,
output_channels=256),
out_channels=256,
featmap_strides=[4, 8, 16, 32])))
| 408 | 30.461538 | 56 |
py
|
ERD
|
ERD-main/configs/dcn/cascade-mask-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 221 | 36 | 72 |
py
|
ERD
|
ERD-main/configs/dcn/cascade-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 216 | 35.166667 | 72 |
py
|
ERD
|
ERD-main/configs/htc/htc_x101-64x4d-dconv-c3-c5_fpn_ms-400-1400-16xb1-20e_coco.py
|
_base_ = './htc_x101-64x4d_fpn_16xb1-20e_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='RandomResize',
scale=[(1600, 400), (1600, 1400)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 616 | 28.380952 | 79 |
py
|
ERD
|
ERD-main/configs/htc/htc_r50_fpn_20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
# learning policy
max_epochs = 20
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
| 373 | 21 | 79 |
py
|
ERD
|
ERD-main/configs/htc/htc-without-semantic_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='HybridTaskCascade',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
]),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| 7,857 | 34.080357 | 79 |
py
|
ERD
|
ERD-main/configs/htc/htc_x101-32x4d_fpn_16xb1-20e_coco.py
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_epochs = 20
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
| 828 | 24.121212 | 79 |
py
|
ERD
|
ERD-main/configs/htc/htc_r50_fpn_1x_coco.py
|
_base_ = './htc-without-semantic_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_seg=True),
roi_head=dict(
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
seg_scale_factor=1 / 8,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
pipeline=train_pipeline))
| 1,195 | 34.176471 | 79 |
py
|
ERD
|
ERD-main/configs/htc/htc_x101-64x4d_fpn_16xb1-20e_coco.py
|
_base_ = './htc_x101-32x4d_fpn_16xb1-20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
groups=64,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 226 | 27.375 | 76 |
py
|
ERD
|
ERD-main/configs/htc/htc_r101_fpn_20e_coco.py
|
_base_ = './htc_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 192 | 26.571429 | 61 |
py
|
ERD
|
ERD-main/configs/dino/dino-4scale_r50_8xb2-12e_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='DINO',
num_queries=900, # num_matching_queries
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=1),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='ChannelMapper',
in_channels=[512, 1024, 2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4,
dropout=0.0), # 0.1 for DeformDETR
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048, # 1024 for DeformDETR
ffn_drop=0.0))), # 0.1 for DeformDETR
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_heads=8,
dropout=0.0), # 0.1 for DeformDETR
cross_attn_cfg=dict(embed_dims=256, num_levels=4,
dropout=0.0), # 0.1 for DeformDETR
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048, # 1024 for DeformDETR
ffn_drop=0.0)), # 0.1 for DeformDETR
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128,
normalize=True,
offset=0.0, # -0.5 for DeformDETR
temperature=20), # 10000 for DeformDETR
bbox_head=dict(
type='DINOHead',
num_classes=80,
sync_cls_avg_factor=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2.0),
dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])),
test_cfg=dict(max_per_img=300)) # 100 for DeformDETR
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
type='AdamW',
lr=0.0001, # 0.0002 for DeformDETR
weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})
) # custom_keys contains sampling_offsets and reference_points in DeformDETR # noqa
# learning policy
max_epochs = 12
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
| 5,783 | 34.268293 | 85 |
py
|
ERD
|
ERD-main/configs/dino/dino-5scale_swin-l_8xb2-36e_coco.py
|
_base_ = './dino-5scale_swin-l_8xb2-12e_coco.py'
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
| 326 | 22.357143 | 70 |
py
|
ERD
|
ERD-main/configs/dino/dino-4scale_r50_8xb2-36e_coco.py
|
_base_ = './dino-4scale_r50_8xb2-12e_coco.py'
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30],
gamma=0.1)
]
| 319 | 21.857143 | 70 |
py
|
ERD
|
ERD-main/configs/dino/dino-5scale_swin-l_8xb2-12e_coco.py
|
_base_ = './dino-4scale_r50_8xb2-12e_coco.py'
fp16 = dict(loss_scale=512.)
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
num_levels = 5
model = dict(
num_feature_levels=num_levels,
backbone=dict(
_delete_=True,
type='SwinTransformer',
pretrain_img_size=384,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=12,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
# Please only add indices that would be used
# in FPN, otherwise some parameter will not be used
with_cp=True,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[192, 384, 768, 1536], num_outs=num_levels),
encoder=dict(layer_cfg=dict(self_attn_cfg=dict(num_levels=num_levels))),
decoder=dict(layer_cfg=dict(cross_attn_cfg=dict(num_levels=num_levels))))
| 1,148 | 34.90625 | 129 |
py
|
ERD
|
ERD-main/configs/dino/dino-4scale_r50_8xb2-24e_coco.py
|
_base_ = './dino-4scale_r50_8xb2-12e_coco.py'
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20],
gamma=0.1)
]
| 319 | 21.857143 | 70 |
py
|
ERD
|
ERD-main/configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py
|
_base_ = 'mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
| 202 | 39.6 | 89 |
py
|
ERD
|
ERD-main/configs/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
| 1,123 | 35.258065 | 77 |
py
|
ERD
|
ERD-main/configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
| 2,276 | 32 | 76 |
py
|
ERD
|
ERD-main/configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-400e_coco.py
|
_base_ = './mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs)
train_dataloader = dict(dataset=dict(times=4 * 4))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.067,
by_epoch=False,
begin=0,
end=500 * 4),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
| 543 | 24.904762 | 91 |
py
|
ERD
|
ERD-main/configs/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py
|
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py'
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
| 188 | 36.8 | 75 |
py
|
ERD
|
ERD-main/configs/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-50e_coco.py
|
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs)
train_dataloader = dict(dataset=dict(times=2))
| 231 | 37.666667 | 75 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 560 | 32 | 76 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py
|
_base_ = './reppoints-moment_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
| 189 | 46.5 | 77 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(bbox_head=dict(transform_method='minmax'))
| 116 | 38 | 59 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-bbox_r50_fpn-gn_head-gn-grid_1x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(
bbox_head=dict(transform_method='minmax', use_grid_points=True),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(
_delete_=True,
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1))))
| 450 | 31.214286 | 68 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 215 | 29.857143 | 61 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-bbox_r50-center_fpn-gn_head-gn-grid_1x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True))
| 138 | 45.333333 | 77 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-partial-minmax_r50_fpn-gn_head-gn_1x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(bbox_head=dict(transform_method='partial_minmax'))
| 124 | 40.666667 | 63 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepPointsDetector',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RepPointsHead',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
optim_wrapper = dict(optimizer=dict(lr=0.01))
| 2,282 | 29.44 | 79 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
| 429 | 22.888889 | 79 |
py
|
ERD
|
ERD-main/configs/reppoints/reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 338 | 36.666667 | 72 |
py
|
ERD
|
ERD-main/configs/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 580 | 29.578947 | 76 |
py
|
ERD
|
ERD-main/configs/gfl/gfl_r101_fpn_ms-2x_coco.py
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 401 | 27.714286 | 61 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.