repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/mmdet/models/backbones/resnext.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmdet.registry import MODELS
from ..layers import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
if self.with_plugins:
self._del_block_plugins(self.after_conv1_plugin_names +
self.after_conv2_plugin_names +
self.after_conv3_plugin_names)
self.after_conv1_plugin_names = self.make_block_plugins(
width, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
width, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
self.planes * self.expansion, self.after_conv3_plugins)
def _del_block_plugins(self, plugin_names):
"""delete plugins for block if exist.
Args:
plugin_names (list[str]): List of plugins name to delete.
"""
assert isinstance(plugin_names, list)
for plugin_name in plugin_names:
del self._modules[plugin_name]
@MODELS.register_module()
class ResNeXt(ResNet):
"""ResNeXt backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
groups (int): Group of resnext.
base_width (int): Base width of resnext.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``"""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 5,712 | 35.858065 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/cspnext.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from torch import Tensor
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from ..layers import CSPLayer
from .csp_darknet import SPPBottleneck
@MODELS.register_module()
class CSPNeXt(BaseModule):
"""CSPNeXt backbone used in RTMDet.
Args:
arch (str): Architecture of CSPNeXt, from {P5, P6}.
Defaults to P5.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Defaults to 0.5.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
out_indices (Sequence[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
arch_ovewrite (list): Overwrite default arch settings.
Defaults to None.
spp_kernel_sizes: (tuple[int]): Sequential of kernel sizes of SPP
layers. Defaults to (5, 9, 13).
channel_attention (bool): Whether to add channel attention in each
stage. Defaults to True.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config norm layer. Defaults to dict(type='BN', requires_grad=True).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to dict(type='SiLU').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 6, True, False], [512, 1024, 3, False, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 6, True, False], [512, 768, 3, True, False],
[768, 1024, 3, False, True]]
}
def __init__(
self,
arch: str = 'P5',
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
out_indices: Sequence[int] = (2, 3, 4),
frozen_stages: int = -1,
use_depthwise: bool = False,
expand_ratio: float = 0.5,
arch_ovewrite: dict = None,
spp_kernel_sizes: Sequence[int] = (5, 9, 13),
channel_attention: bool = True,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU'),
norm_eval: bool = False,
init_cfg: OptMultiConfig = dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')
) -> None:
super().__init__(init_cfg=init_cfg)
arch_setting = self.arch_settings[arch]
if arch_ovewrite:
arch_setting = arch_ovewrite
assert set(out_indices).issubset(
i for i in range(len(arch_setting) + 1))
if frozen_stages not in range(-1, len(arch_setting) + 1):
raise ValueError('frozen_stages must be in range(-1, '
'len(arch_setting) + 1). But received '
f'{frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.use_depthwise = use_depthwise
self.norm_eval = norm_eval
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.stem = nn.Sequential(
ConvModule(
3,
int(arch_setting[0][0] * widen_factor // 2),
3,
padding=1,
stride=2,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
int(arch_setting[0][0] * widen_factor // 2),
int(arch_setting[0][0] * widen_factor // 2),
3,
padding=1,
stride=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
int(arch_setting[0][0] * widen_factor // 2),
int(arch_setting[0][0] * widen_factor),
3,
padding=1,
stride=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.layers = ['stem']
for i, (in_channels, out_channels, num_blocks, add_identity,
use_spp) in enumerate(arch_setting):
in_channels = int(in_channels * widen_factor)
out_channels = int(out_channels * widen_factor)
num_blocks = max(round(num_blocks * deepen_factor), 1)
stage = []
conv_layer = conv(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(conv_layer)
if use_spp:
spp = SPPBottleneck(
out_channels,
out_channels,
kernel_sizes=spp_kernel_sizes,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(spp)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
use_depthwise=use_depthwise,
use_cspnext_block=True,
expand_ratio=expand_ratio,
channel_attention=channel_attention,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(csp_layer)
self.add_module(f'stage{i + 1}', nn.Sequential(*stage))
self.layers.append(f'stage{i + 1}')
def _freeze_stages(self) -> None:
if self.frozen_stages >= 0:
for i in range(self.frozen_stages + 1):
m = getattr(self, self.layers[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True) -> None:
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def forward(self, x: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 7,784 | 38.719388 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/resnest.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from ..layers import ResLayer
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNetV1d
class RSoftmax(nn.Module):
"""Radix Softmax module in ``SplitAttentionConv2d``.
Args:
radix (int): Radix of input.
groups (int): Groups of input.
"""
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplitAttentionConv2d(BaseModule):
"""Split-Attention Conv2d in ResNeSt.
Args:
in_channels (int): Number of channels in the input feature map.
channels (int): Number of intermediate channels.
kernel_size (int | tuple[int]): Size of the convolution kernel.
stride (int | tuple[int]): Stride of the convolution.
padding (int | tuple[int]): Zero-padding added to both sides of
dilation (int | tuple[int]): Spacing between kernel elements.
groups (int): Number of blocked connections from input channels to
output channels.
groups (int): Same as nn.Conv2d.
radix (int): Radix of SpltAtConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels. Default: 4.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
dcn (dict): Config dict for DCN. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
radix=2,
reduction_factor=4,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
init_cfg=None):
super(SplitAttentionConv2d, self).__init__(init_cfg)
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.groups = groups
self.channels = channels
self.with_dcn = dcn is not None
self.dcn = dcn
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_dcn and not fallback_on_stride:
assert conv_cfg is None, 'conv_cfg must be None for DCN'
conv_cfg = dcn
self.conv = build_conv_layer(
conv_cfg,
in_channels,
channels * radix,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups * radix,
bias=False)
# To be consistent with original implementation, starting from 0
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, channels * radix, postfix=0)
self.add_module(self.norm0_name, norm0)
self.relu = nn.ReLU(inplace=True)
self.fc1 = build_conv_layer(
None, channels, inter_channels, 1, groups=self.groups)
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, inter_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.fc2 = build_conv_layer(
None, inter_channels, channels * radix, 1, groups=self.groups)
self.rsoftmax = RSoftmax(radix, groups)
@property
def norm0(self):
"""nn.Module: the normalization layer named "norm0" """
return getattr(self, self.norm0_name)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def forward(self, x):
x = self.conv(x)
x = self.norm0(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
batch = x.size(0)
if self.radix > 1:
splits = x.view(batch, self.radix, -1, *x.shape[2:])
gap = splits.sum(dim=1)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.norm1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
out = torch.sum(attens * splits, dim=1)
else:
out = atten * x
return out.contiguous()
class Bottleneck(_Bottleneck):
"""Bottleneck block for ResNeSt.
Args:
inplane (int): Input planes of this block.
planes (int): Middle planes of this block.
groups (int): Groups of conv2.
base_width (int): Base of width in terms of base channels. Default: 4.
base_channels (int): Base of channels for calculating width.
Default: 64.
radix (int): Radix of SpltAtConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels in
SplitAttentionConv2d. Default: 4.
avg_down_stride (bool): Whether to use average pool for stride in
Bottleneck. Default: True.
kwargs (dict): Key word arguments for base class.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
radix=2,
reduction_factor=4,
avg_down_stride=True,
**kwargs):
"""Bottleneck block for ResNeSt."""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.with_modulated_dcn = False
self.conv2 = SplitAttentionConv2d(
width,
width,
kernel_size=3,
stride=1 if self.avg_down_stride else self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
radix=radix,
reduction_factor=reduction_factor,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=self.dcn)
delattr(self, self.norm2_name)
if self.avg_down_stride:
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
if self.avg_down_stride:
out = self.avd_layer(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@MODELS.register_module()
class ResNeSt(ResNetV1d):
"""ResNeSt backbone.
Args:
groups (int): Number of groups of Bottleneck. Default: 1
base_width (int): Base width of Bottleneck. Default: 4
radix (int): Radix of SplitAttentionConv2d. Default: 2
reduction_factor (int): Reduction factor of inter_channels in
SplitAttentionConv2d. Default: 4.
avg_down_stride (bool): Whether to use average pool for stride in
Bottleneck. Default: True.
kwargs (dict): Keyword arguments for ResNet.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3)),
200: (Bottleneck, (3, 24, 36, 3))
}
def __init__(self,
groups=1,
base_width=4,
radix=2,
reduction_factor=4,
avg_down_stride=True,
**kwargs):
self.groups = groups
self.base_width = base_width
self.radix = radix
self.reduction_factor = reduction_factor
self.avg_down_stride = avg_down_stride
super(ResNeSt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
radix=self.radix,
reduction_factor=self.reduction_factor,
avg_down_stride=self.avg_down_stride,
**kwargs)
| 10,582 | 31.764706 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/csp_darknet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
from ..layers import CSPLayer
class Focus(nn.Module):
"""Focus width and height information into channel space.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_size (int): The kernel size of the convolution. Default: 1
stride (int): The stride of the convolution. Default: 1
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish')):
super().__init__()
self.conv = ConvModule(
in_channels * 4,
out_channels,
kernel_size,
stride,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
# shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
patch_top_left = x[..., ::2, ::2]
patch_top_right = x[..., ::2, 1::2]
patch_bot_left = x[..., 1::2, ::2]
patch_bot_right = x[..., 1::2, 1::2]
x = torch.cat(
(
patch_top_left,
patch_bot_left,
patch_top_right,
patch_bot_right,
),
dim=1,
)
return self.conv(x)
class SPPBottleneck(BaseModule):
"""Spatial pyramid pooling layer used in YOLOv3-SPP.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling
layers. Default: (5, 9, 13).
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes=(5, 9, 13),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=None):
super().__init__(init_cfg)
mid_channels = in_channels // 2
self.conv1 = ConvModule(
in_channels,
mid_channels,
1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.poolings = nn.ModuleList([
nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
for ks in kernel_sizes
])
conv2_channels = mid_channels * (len(kernel_sizes) + 1)
self.conv2 = ConvModule(
conv2_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
x = self.conv1(x)
with torch.cuda.amp.autocast(enabled=False):
x = torch.cat(
[x] + [pooling(x) for pooling in self.poolings], dim=1)
x = self.conv2(x)
return x
@MODELS.register_module()
class CSPDarknet(BaseModule):
"""CSP-Darknet backbone used in YOLOv5 and YOLOX.
Args:
arch (str): Architecture of CSP-Darknet, from {P5, P6}.
Default: P5.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Default: 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (Sequence[int]): Output from which stages.
Default: (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Default: -1.
use_depthwise (bool): Whether to use depthwise separable convolution.
Default: False.
arch_ovewrite(list): Overwrite default arch settings. Default: None.
spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP
layers. Default: (5, 9, 13).
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmdet.models import CSPDarknet
>>> import torch
>>> self = CSPDarknet(depth=53)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 1024, 3, False, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 768, 3, True, False],
[768, 1024, 3, False, True]]
}
def __init__(self,
arch='P5',
deepen_factor=1.0,
widen_factor=1.0,
out_indices=(2, 3, 4),
frozen_stages=-1,
use_depthwise=False,
arch_ovewrite=None,
spp_kernal_sizes=(5, 9, 13),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
norm_eval=False,
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super().__init__(init_cfg)
arch_setting = self.arch_settings[arch]
if arch_ovewrite:
arch_setting = arch_ovewrite
assert set(out_indices).issubset(
i for i in range(len(arch_setting) + 1))
if frozen_stages not in range(-1, len(arch_setting) + 1):
raise ValueError('frozen_stages must be in range(-1, '
'len(arch_setting) + 1). But received '
f'{frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.use_depthwise = use_depthwise
self.norm_eval = norm_eval
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.stem = Focus(
3,
int(arch_setting[0][0] * widen_factor),
kernel_size=3,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.layers = ['stem']
for i, (in_channels, out_channels, num_blocks, add_identity,
use_spp) in enumerate(arch_setting):
in_channels = int(in_channels * widen_factor)
out_channels = int(out_channels * widen_factor)
num_blocks = max(round(num_blocks * deepen_factor), 1)
stage = []
conv_layer = conv(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(conv_layer)
if use_spp:
spp = SPPBottleneck(
out_channels,
out_channels,
kernel_sizes=spp_kernal_sizes,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(spp)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
stage.append(csp_layer)
self.add_module(f'stage{i + 1}', nn.Sequential(*stage))
self.layers.append(f'stage{i + 1}')
def _freeze_stages(self):
if self.frozen_stages >= 0:
for i in range(self.frozen_stages + 1):
m = getattr(self, self.layers[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(CSPDarknet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 10,620 | 36.006969 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .cspnext import CSPNeXt
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .efficientnet import EfficientNet
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .swin import SwinTransformer
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
'SwinTransformer', 'PyramidVisionTransformer',
'PyramidVisionTransformerV2', 'EfficientNet', 'CSPNeXt'
]
| 1,039 | 36.142857 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/hourglass.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from ..layers import ResLayer
from .resnet import BasicBlock
class HourglassModule(BaseModule):
"""Hourglass Module for HourglassNet backbone.
Generate module recursively and use BasicBlock as the base unit.
Args:
depth (int): Depth of current HourglassModule.
stage_channels (list[int]): Feature channels of sub-modules in current
and follow-up HourglassModule.
stage_blocks (list[int]): Number of sub-modules stacked in current and
follow-up HourglassModule.
norm_cfg (ConfigType): Dictionary to construct and config norm layer.
Defaults to `dict(type='BN', requires_grad=True)`
upsample_cfg (ConfigType): Config dict for interpolate layer.
Defaults to `dict(mode='nearest')`
init_cfg (dict or ConfigDict, optional): the config to control the
initialization.
"""
def __init__(self,
depth: int,
stage_channels: List[int],
stage_blocks: List[int],
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
upsample_cfg: ConfigType = dict(mode='nearest'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg)
self.depth = depth
cur_block = stage_blocks[0]
next_block = stage_blocks[1]
cur_channel = stage_channels[0]
next_channel = stage_channels[1]
self.up1 = ResLayer(
BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
self.low1 = ResLayer(
BasicBlock,
cur_channel,
next_channel,
cur_block,
stride=2,
norm_cfg=norm_cfg)
if self.depth > 1:
self.low2 = HourglassModule(depth - 1, stage_channels[1:],
stage_blocks[1:])
else:
self.low2 = ResLayer(
BasicBlock,
next_channel,
next_channel,
next_block,
norm_cfg=norm_cfg)
self.low3 = ResLayer(
BasicBlock,
next_channel,
cur_channel,
cur_block,
norm_cfg=norm_cfg,
downsample_first=False)
self.up2 = F.interpolate
self.upsample_cfg = upsample_cfg
def forward(self, x: torch.Tensor) -> nn.Module:
"""Forward function."""
up1 = self.up1(x)
low1 = self.low1(x)
low2 = self.low2(low1)
low3 = self.low3(low2)
# Fixing `scale factor` (e.g. 2) is common for upsampling, but
# in some cases the spatial size is mismatched and error will arise.
if 'scale_factor' in self.upsample_cfg:
up2 = self.up2(low3, **self.upsample_cfg)
else:
shape = up1.shape[2:]
up2 = self.up2(low3, size=shape, **self.upsample_cfg)
return up1 + up2
@MODELS.register_module()
class HourglassNet(BaseModule):
"""HourglassNet backbone.
Stacked Hourglass Networks for Human Pose Estimation.
More details can be found in the `paper
<https://arxiv.org/abs/1603.06937>`_ .
Args:
downsample_times (int): Downsample times in a HourglassModule.
num_stacks (int): Number of HourglassModule modules stacked,
1 for Hourglass-52, 2 for Hourglass-104.
stage_channels (Sequence[int]): Feature channel of each sub-module in a
HourglassModule.
stage_blocks (Sequence[int]): Number of sub-modules stacked in a
HourglassModule.
feat_channel (int): Feature channel of conv after a HourglassModule.
norm_cfg (norm_cfg): Dictionary to construct and config norm layer.
init_cfg (dict or ConfigDict, optional): the config to control the
initialization.
Example:
>>> from mmdet.models import HourglassNet
>>> import torch
>>> self = HourglassNet()
>>> self.eval()
>>> inputs = torch.rand(1, 3, 511, 511)
>>> level_outputs = self.forward(inputs)
>>> for level_output in level_outputs:
... print(tuple(level_output.shape))
(1, 256, 128, 128)
(1, 256, 128, 128)
"""
def __init__(self,
downsample_times: int = 5,
num_stacks: int = 2,
stage_channels: Sequence = (256, 256, 384, 384, 384, 512),
stage_blocks: Sequence = (2, 2, 2, 2, 2, 4),
feat_channel: int = 256,
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
init_cfg: OptMultiConfig = None) -> None:
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg)
self.num_stacks = num_stacks
assert self.num_stacks >= 1
assert len(stage_channels) == len(stage_blocks)
assert len(stage_channels) > downsample_times
cur_channel = stage_channels[0]
self.stem = nn.Sequential(
ConvModule(
3, cur_channel // 2, 7, padding=3, stride=2,
norm_cfg=norm_cfg),
ResLayer(
BasicBlock,
cur_channel // 2,
cur_channel,
1,
stride=2,
norm_cfg=norm_cfg))
self.hourglass_modules = nn.ModuleList([
HourglassModule(downsample_times, stage_channels, stage_blocks)
for _ in range(num_stacks)
])
self.inters = ResLayer(
BasicBlock,
cur_channel,
cur_channel,
num_stacks - 1,
norm_cfg=norm_cfg)
self.conv1x1s = nn.ModuleList([
ConvModule(
cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
for _ in range(num_stacks - 1)
])
self.out_convs = nn.ModuleList([
ConvModule(
cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
for _ in range(num_stacks)
])
self.remap_convs = nn.ModuleList([
ConvModule(
feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
for _ in range(num_stacks - 1)
])
self.relu = nn.ReLU(inplace=True)
def init_weights(self) -> None:
"""Init module weights."""
# Training Centripetal Model needs to reset parameters for Conv2d
super().init_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.reset_parameters()
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
"""Forward function."""
inter_feat = self.stem(x)
out_feats = []
for ind in range(self.num_stacks):
single_hourglass = self.hourglass_modules[ind]
out_conv = self.out_convs[ind]
hourglass_feat = single_hourglass(inter_feat)
out_feat = out_conv(hourglass_feat)
out_feats.append(out_feat)
if ind < self.num_stacks - 1:
inter_feat = self.conv1x1s[ind](
inter_feat) + self.remap_convs[ind](
out_feat)
inter_feat = self.inters[ind](self.relu(inter_feat))
return out_feats
| 7,744 | 33.269912 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/res2net.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import Sequential
from mmdet.registry import MODELS
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
scales=4,
base_width=26,
base_channels=64,
stage_type='normal',
**kwargs):
"""Bottle2neck block for Res2Net.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor(self.planes * (base_width / base_channels)))
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width * scales, postfix=1)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width * scales,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
if stage_type == 'stage' and self.conv2_stride != 1:
self.pool = nn.AvgPool2d(
kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
for i in range(scales - 1):
convs.append(
build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False))
bns.append(
build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(
self.conv_cfg,
width * scales,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, self.scales - 1):
if self.stage_type == 'stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if self.stage_type == 'normal' or self.conv2_stride == 1:
out = torch.cat((out, spx[self.scales - 1]), 1)
elif self.stage_type == 'stage':
out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Res2Layer(Sequential):
"""Res2Layer to build Res2Net style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
scales=4,
base_width=26,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False),
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
stage_type='stage',
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
scales=scales,
base_width=base_width,
**kwargs))
super(Res2Layer, self).__init__(*layers)
@MODELS.register_module()
class Res2Net(ResNet):
"""Res2Net backbone.
Args:
scales (int): Scales used in Res2Net. Default: 4
base_width (int): Basic width of each scale. Default: 26
depth (int): Depth of res2net, from {50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Res2net stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottle2neck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import Res2Net
>>> import torch
>>> self = Res2Net(depth=50, scales=4, base_width=26)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 256, 8, 8)
(1, 512, 4, 4)
(1, 1024, 2, 2)
(1, 2048, 1, 1)
"""
arch_settings = {
50: (Bottle2neck, (3, 4, 6, 3)),
101: (Bottle2neck, (3, 4, 23, 3)),
152: (Bottle2neck, (3, 8, 36, 3))
}
def __init__(self,
scales=4,
base_width=26,
style='pytorch',
deep_stem=True,
avg_down=True,
pretrained=None,
init_cfg=None,
**kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(
style='pytorch',
deep_stem=True,
avg_down=True,
pretrained=pretrained,
init_cfg=init_cfg,
**kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(
scales=self.scales,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 11,661 | 34.554878 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/darknet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import warnings
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
class ResBlock(BaseModule):
"""The basic residual block used in Darknet. Each ResBlock consists of two
ConvModules and the input is added to the final output. Each ConvModule is
composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
has half of the number of the filters as much as the second convLayer. The
first convLayer has filter size of 1x1 and the second one has the filter
size of 3x3.
Args:
in_channels (int): The input channels. Must be even.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None):
super(ResBlock, self).__init__(init_cfg)
assert in_channels % 2 == 0 # ensure the in_channels is even
half_in_channels = in_channels // 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
self.conv2 = ConvModule(
half_in_channels, in_channels, 3, padding=1, **cfg)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = out + residual
return out
@MODELS.register_module()
class Darknet(BaseModule):
"""Darknet backbone.
Args:
depth (int): Depth of Darknet. Currently only support 53.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import Darknet
>>> import torch
>>> self = Darknet(depth=53)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# Dict(depth: (layers, channels))
arch_settings = {
53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
(512, 1024)))
}
def __init__(self,
depth=53,
out_indices=(3, 4, 5),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
norm_eval=True,
pretrained=None,
init_cfg=None):
super(Darknet, self).__init__(init_cfg)
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for darknet')
self.depth = depth
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.layers, self.channels = self.arch_settings[depth]
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
self.cr_blocks = ['conv1']
for i, n_layers in enumerate(self.layers):
layer_name = f'conv_res_block{i + 1}'
in_c, out_c = self.channels[i]
self.add_module(
layer_name,
self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
self.cr_blocks.append(layer_name)
self.norm_eval = norm_eval
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.cr_blocks):
cr_block = getattr(self, layer_name)
x = cr_block(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for i in range(self.frozen_stages):
m = getattr(self, self.cr_blocks[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(Darknet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@staticmethod
def make_conv_res_block(in_channels,
out_channels,
res_repeat,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU',
negative_slope=0.1)):
"""In Darknet backbone, ConvLayer is usually followed by ResBlock. This
function will make that. The Conv layers always have 3x3 filters with
stride=2. The number of the filters in Conv layer is the same as the
out channels of the ResBlock.
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
res_repeat (int): The number of ResBlocks.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
"""
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
model = nn.Sequential()
model.add_module(
'conv',
ConvModule(
in_channels, out_channels, 3, stride=2, padding=1, **cfg))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx),
ResBlock(out_channels, **cfg))
return model
| 8,235 | 37.485981 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/deepfashion.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
"""Dataset for DeepFashion."""
METAINFO = {
'classes': ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants',
'bag', 'neckwear', 'headwear', 'eyeglass', 'belt',
'footwear', 'hair', 'skin', 'face'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),
(0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),
(128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),
(128, 0, 96), (128, 0, 192), (0, 32, 192)]
}
| 819 | 40 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/voc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
"""Dataset for PASCAL VOC."""
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199),
(0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60),
(163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100),
(183, 130, 88)]
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'VOC2007' in self.sub_data_root:
self._metainfo['dataset_type'] = 'VOC2007'
elif 'VOC2012' in self.sub_data_root:
self._metainfo['dataset_type'] = 'VOC2012'
else:
self._metainfo['dataset_type'] = None
| 1,305 | 39.8125 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/crowdhuman.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import json
import logging
import os.path as osp
import warnings
from typing import List, Union
import mmcv
from mmengine.dist import get_rank
from mmengine.fileio import dump, get, get_text, load
from mmengine.logging import print_log
from mmengine.utils import ProgressBar
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class CrowdHumanDataset(BaseDetDataset):
r"""Dataset for CrowdHuman.
Args:
data_root (str): The root directory for
``data_prefix`` and ``ann_file``.
ann_file (str): Annotation file path.
extra_ann_file (str | optional):The path of extra image metas
for CrowdHuman. It can be created by CrowdHumanDataset
automatically or by tools/misc/get_crowdhuman_id_hw.py
manually. Defaults to None.
"""
METAINFO = {
'classes': ('person', ),
# palette is a list of color tuples, which is used for visualization.
'palette': [(220, 20, 60)]
}
def __init__(self, data_root, ann_file, extra_ann_file=None, **kwargs):
# extra_ann_file record the size of each image. This file is
# automatically created when you first load the CrowdHuman
# dataset by mmdet.
if extra_ann_file is not None:
self.extra_ann_exist = True
self.extra_anns = load(extra_ann_file)
else:
ann_file_name = osp.basename(ann_file)
if 'train' in ann_file_name:
self.extra_ann_file = osp.join(data_root, 'id_hw_train.json')
elif 'val' in ann_file_name:
self.extra_ann_file = osp.join(data_root, 'id_hw_val.json')
self.extra_ann_exist = False
if not osp.isfile(self.extra_ann_file):
print_log(
'extra_ann_file does not exist, prepare to collect '
'image height and width...',
level=logging.INFO)
self.extra_anns = {}
else:
self.extra_ann_exist = True
self.extra_anns = load(self.extra_ann_file)
super().__init__(data_root=data_root, ann_file=ann_file, **kwargs)
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
anno_strs = get_text(
self.ann_file, backend_args=self.backend_args).strip().split('\n')
print_log('loading CrowdHuman annotation...', level=logging.INFO)
data_list = []
prog_bar = ProgressBar(len(anno_strs))
for i, anno_str in enumerate(anno_strs):
anno_dict = json.loads(anno_str)
parsed_data_info = self.parse_data_info(anno_dict)
data_list.append(parsed_data_info)
prog_bar.update()
if not self.extra_ann_exist and get_rank() == 0:
# TODO: support file client
try:
dump(self.extra_anns, self.extra_ann_file, file_format='json')
except: # noqa
warnings.warn(
'Cache files can not be saved automatically! To speed up'
'loading the dataset, please manually generate the cache'
' file by file tools/misc/get_crowdhuman_id_hw.py')
print_log(
f'\nsave extra_ann_file in {self.data_root}',
level=logging.INFO)
del self.extra_anns
print_log('\nDone', level=logging.INFO)
return data_list
def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:
"""Parse raw annotation to target format.
Args:
raw_data_info (dict): Raw data information load from ``ann_file``
Returns:
Union[dict, List[dict]]: Parsed annotation.
"""
data_info = {}
img_path = osp.join(self.data_prefix['img'],
f"{raw_data_info['ID']}.jpg")
data_info['img_path'] = img_path
data_info['img_id'] = raw_data_info['ID']
if not self.extra_ann_exist:
img_bytes = get(img_path, backend_args=self.backend_args)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
data_info['height'], data_info['width'] = img.shape[:2]
self.extra_anns[raw_data_info['ID']] = img.shape[:2]
del img, img_bytes
else:
data_info['height'], data_info['width'] = self.extra_anns[
raw_data_info['ID']]
instances = []
for i, ann in enumerate(raw_data_info['gtboxes']):
instance = {}
if ann['tag'] not in self.metainfo['classes']:
instance['bbox_label'] = -1
instance['ignore_flag'] = 1
else:
instance['bbox_label'] = self.metainfo['classes'].index(
ann['tag'])
instance['ignore_flag'] = 0
if 'extra' in ann:
if 'ignore' in ann['extra']:
if ann['extra']['ignore'] != 0:
instance['bbox_label'] = -1
instance['ignore_flag'] = 1
x1, y1, w, h = ann['fbox']
bbox = [x1, y1, x1 + w, y1 + h]
instance['bbox'] = bbox
# Record the full bbox(fbox), head bbox(hbox) and visible
# bbox(vbox) as additional information. If you need to use
# this information, you just need to design the pipeline
# instead of overriding the CrowdHumanDataset.
instance['fbox'] = bbox
hbox = ann['hbox']
instance['hbox'] = [
hbox[0], hbox[1], hbox[0] + hbox[2], hbox[1] + hbox[3]
]
vbox = ann['vbox']
instance['vbox'] = [
vbox[0], vbox[1], vbox[0] + vbox[2], vbox[1] + vbox[3]
]
instances.append(instance)
data_info['instances'] = instances
return data_info
| 6,153 | 37.4625 | 78 |
py
|
ERD
|
ERD-main/mmdet/datasets/base_det_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List, Optional
from mmengine.dataset import BaseDataset
from mmengine.fileio import load
from mmengine.utils import is_abs
from ..registry import DATASETS
@DATASETS.register_module()
class BaseDetDataset(BaseDataset):
"""Base dataset for detection.
Args:
proposal_file (str, optional): Proposals file path. Defaults to None.
file_client_args (dict): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
*args,
seg_map_suffix: str = '.png',
proposal_file: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
**kwargs) -> None:
self.seg_map_suffix = seg_map_suffix
self.proposal_file = proposal_file
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
super().__init__(*args, **kwargs)
def full_init(self) -> None:
"""Load annotation file and set ``BaseDataset._fully_initialized`` to
True.
If ``lazy_init=False``, ``full_init`` will be called during the
instantiation and ``self._fully_initialized`` will be set to True. If
``obj._fully_initialized=False``, the class method decorated by
``force_full_init`` will call ``full_init`` automatically.
Several steps to initialize annotation:
- load_data_list: Load annotations from annotation file.
- load_proposals: Load proposals from proposal file, if
`self.proposal_file` is not None.
- filter data information: Filter annotations according to
filter_cfg.
- slice_data: Slice dataset according to ``self._indices``
- serialize_data: Serialize ``self.data_list`` if
``self.serialize_data`` is True.
"""
if self._fully_initialized:
return
# load data information
self.data_list = self.load_data_list()
# get proposals from file
if self.proposal_file is not None:
self.load_proposals()
# filter illegal data, such as data that has no annotations.
self.data_list = self.filter_data()
# Get subset data according to indices.
if self._indices is not None:
self.data_list = self._get_unserialized_subset(self._indices)
# serialize data_list
if self.serialize_data:
self.data_bytes, self.data_address = self._serialize_data()
self._fully_initialized = True
def load_proposals(self) -> None:
"""Load proposals from proposals file.
The `proposals_list` should be a dict[img_path: proposals]
with the same length as `data_list`. And the `proposals` should be
a `dict` or :obj:`InstanceData` usually contains following keys.
- bboxes (np.ndarry): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- scores (np.ndarry): Classification scores, has a shape
(num_instance, ).
"""
# TODO: Add Unit Test after fully support Dump-Proposal Metric
if not is_abs(self.proposal_file):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
proposals_list = load(
self.proposal_file, backend_args=self.backend_args)
assert len(self.data_list) == len(proposals_list)
for data_info in self.data_list:
img_path = data_info['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
proposals = proposals_list[file_name]
data_info['proposals'] = proposals
def get_cat_ids(self, idx: int) -> List[int]:
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
List[int]: All categories in the image of specified index.
"""
instances = self.get_data_info(idx)['instances']
return [instance['bbox_label'] for instance in instances]
| 4,796 | 38.644628 | 125 |
py
|
ERD
|
ERD-main/mmdet/datasets/cityscapes.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
from typing import List
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
"""Dataset for Cityscapes."""
METAINFO = {
'classes': ('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'),
'palette': [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),
(0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
}
def filter_data(self) -> List[dict]:
"""Filter annotations according to filter_cfg.
Returns:
List[dict]: Filtered results.
"""
if self.test_mode:
return self.data_list
if self.filter_cfg is None:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
# obtain images that contain annotation
ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_data_infos = []
for i, data_info in enumerate(self.data_list):
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
all_is_crowd = all([
instance['ignore_flag'] == 1
for instance in data_info['instances']
])
if filter_empty_gt and (img_id not in ids_in_cat or all_is_crowd):
continue
if min(width, height) >= min_size:
valid_data_infos.append(data_info)
return valid_data_infos
| 2,323 | 36.483871 | 135 |
py
|
ERD
|
ERD-main/mmdet/datasets/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
| 1,925 | 38.306122 | 78 |
py
|
ERD
|
ERD-main/mmdet/datasets/openimages.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import csv
import os.path as osp
from collections import defaultdict
from typing import Dict, List, Optional
import numpy as np
from mmengine.fileio import get_local_path, load
from mmengine.utils import is_abs
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class OpenImagesDataset(BaseDetDataset):
"""Open Images dataset for detection.
Args:
ann_file (str): Annotation file path.
label_file (str): File path of the label description file that
maps the classes names in MID format to their short
descriptions.
meta_file (str): File path to get image metas.
hierarchy_file (str): The file path of the class hierarchy.
image_level_ann_file (str): Human-verified image level annotation,
which is used in evaluation.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
METAINFO: dict = dict(dataset_type='oid_v6')
def __init__(self,
label_file: str,
meta_file: str,
hierarchy_file: str,
image_level_ann_file: Optional[str] = None,
**kwargs) -> None:
self.label_file = label_file
self.meta_file = meta_file
self.hierarchy_file = hierarchy_file
self.image_level_ann_file = image_level_ann_file
super().__init__(**kwargs)
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
"""
classes_names, label_id_mapping = self._parse_label_file(
self.label_file)
self._metainfo['classes'] = classes_names
self.label_id_mapping = label_id_mapping
if self.image_level_ann_file is not None:
img_level_anns = self._parse_img_level_ann(
self.image_level_ann_file)
else:
img_level_anns = None
# OpenImagesMetric can get the relation matrix from the dataset meta
relation_matrix = self._get_relation_matrix(self.hierarchy_file)
self._metainfo['RELATION_MATRIX'] = relation_matrix
data_list = []
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
reader = csv.reader(f)
last_img_id = None
instances = []
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
if last_img_id is None:
last_img_id = img_id
label_id = line[2]
assert label_id in self.label_id_mapping
label = int(self.label_id_mapping[label_id])
bbox = [
float(line[4]), # xmin
float(line[6]), # ymin
float(line[5]), # xmax
float(line[7]) # ymax
]
is_occluded = True if int(line[8]) == 1 else False
is_truncated = True if int(line[9]) == 1 else False
is_group_of = True if int(line[10]) == 1 else False
is_depiction = True if int(line[11]) == 1 else False
is_inside = True if int(line[12]) == 1 else False
instance = dict(
bbox=bbox,
bbox_label=label,
ignore_flag=0,
is_occluded=is_occluded,
is_truncated=is_truncated,
is_group_of=is_group_of,
is_depiction=is_depiction,
is_inside=is_inside)
last_img_path = osp.join(self.data_prefix['img'],
f'{last_img_id}.jpg')
if img_id != last_img_id:
# switch to a new image, record previous image's data.
data_info = dict(
img_path=last_img_path,
img_id=last_img_id,
instances=instances,
)
data_list.append(data_info)
instances = []
instances.append(instance)
last_img_id = img_id
data_list.append(
dict(
img_path=last_img_path,
img_id=last_img_id,
instances=instances,
))
# add image metas to data list
img_metas = load(
self.meta_file, file_format='pkl', backend_args=self.backend_args)
assert len(img_metas) == len(data_list)
for i, meta in enumerate(img_metas):
img_id = data_list[i]['img_id']
assert f'{img_id}.jpg' == osp.split(meta['filename'])[-1]
h, w = meta['ori_shape'][:2]
data_list[i]['height'] = h
data_list[i]['width'] = w
# denormalize bboxes
for j in range(len(data_list[i]['instances'])):
data_list[i]['instances'][j]['bbox'][0] *= w
data_list[i]['instances'][j]['bbox'][2] *= w
data_list[i]['instances'][j]['bbox'][1] *= h
data_list[i]['instances'][j]['bbox'][3] *= h
# add image-level annotation
if img_level_anns is not None:
img_labels = []
confidences = []
img_ann_list = img_level_anns.get(img_id, [])
for ann in img_ann_list:
img_labels.append(int(ann['image_level_label']))
confidences.append(float(ann['confidence']))
data_list[i]['image_level_labels'] = np.array(
img_labels, dtype=np.int64)
data_list[i]['confidences'] = np.array(
confidences, dtype=np.float32)
return data_list
def _parse_label_file(self, label_file: str) -> tuple:
"""Get classes name and index mapping from cls-label-description file.
Args:
label_file (str): File path of the label description file that
maps the classes names in MID format to their short
descriptions.
Returns:
tuple: Class name of OpenImages.
"""
index_list = []
classes_names = []
with get_local_path(
label_file, backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
reader = csv.reader(f)
for line in reader:
# self.cat2label[line[0]] = line[1]
classes_names.append(line[1])
index_list.append(line[0])
index_mapping = {index: i for i, index in enumerate(index_list)}
return classes_names, index_mapping
def _parse_img_level_ann(self,
img_level_ann_file: str) -> Dict[str, List[dict]]:
"""Parse image level annotations from csv style ann_file.
Args:
img_level_ann_file (str): CSV style image level annotation
file path.
Returns:
Dict[str, List[dict]]: Annotations where item of the defaultdict
indicates an image, each of which has (n) dicts.
Keys of dicts are:
- `image_level_label` (int): Label id.
- `confidence` (float): Labels that are human-verified to be
present in an image have confidence = 1 (positive labels).
Labels that are human-verified to be absent from an image
have confidence = 0 (negative labels). Machine-generated
labels have fractional confidences, generally >= 0.5.
The higher the confidence, the smaller the chance for
the label to be a false positive.
"""
item_lists = defaultdict(list)
with get_local_path(
img_level_ann_file,
backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
item_lists[img_id].append(
dict(
image_level_label=int(
self.label_id_mapping[line[2]]),
confidence=float(line[3])))
return item_lists
def _get_relation_matrix(self, hierarchy_file: str) -> np.ndarray:
"""Get the matrix of class hierarchy from the hierarchy file. Hierarchy
for 600 classes can be found at https://storage.googleapis.com/openimag
es/2018_04/bbox_labels_600_hierarchy_visualizer/circle.html.
Args:
hierarchy_file (str): File path to the hierarchy for classes.
Returns:
np.ndarray: The matrix of the corresponding relationship between
the parent class and the child class, of shape
(class_num, class_num).
""" # noqa
hierarchy = load(
hierarchy_file, file_format='json', backend_args=self.backend_args)
class_num = len(self._metainfo['classes'])
relation_matrix = np.eye(class_num, class_num)
relation_matrix = self._convert_hierarchy_tree(hierarchy,
relation_matrix)
return relation_matrix
def _convert_hierarchy_tree(self,
hierarchy_map: dict,
relation_matrix: np.ndarray,
parents: list = [],
get_all_parents: bool = True) -> np.ndarray:
"""Get matrix of the corresponding relationship between the parent
class and the child class.
Args:
hierarchy_map (dict): Including label name and corresponding
subcategory. Keys of dicts are:
- `LabeName` (str): Name of the label.
- `Subcategory` (dict | list): Corresponding subcategory(ies).
relation_matrix (ndarray): The matrix of the corresponding
relationship between the parent class and the child class,
of shape (class_num, class_num).
parents (list): Corresponding parent class.
get_all_parents (bool): Whether get all parent names.
Default: True
Returns:
ndarray: The matrix of the corresponding relationship between
the parent class and the child class, of shape
(class_num, class_num).
"""
if 'Subcategory' in hierarchy_map:
for node in hierarchy_map['Subcategory']:
if 'LabelName' in node:
children_name = node['LabelName']
children_index = self.label_id_mapping[children_name]
children = [children_index]
else:
continue
if len(parents) > 0:
for parent_index in parents:
if get_all_parents:
children.append(parent_index)
relation_matrix[children_index, parent_index] = 1
relation_matrix = self._convert_hierarchy_tree(
node, relation_matrix, parents=children)
return relation_matrix
def _join_prefix(self):
"""Join ``self.data_root`` with annotation path."""
super()._join_prefix()
if not is_abs(self.label_file) and self.label_file:
self.label_file = osp.join(self.data_root, self.label_file)
if not is_abs(self.meta_file) and self.meta_file:
self.meta_file = osp.join(self.data_root, self.meta_file)
if not is_abs(self.hierarchy_file) and self.hierarchy_file:
self.hierarchy_file = osp.join(self.data_root, self.hierarchy_file)
if self.image_level_ann_file and not is_abs(self.image_level_ann_file):
self.image_level_ann_file = osp.join(self.data_root,
self.image_level_ann_file)
@DATASETS.register_module()
class OpenImagesChallengeDataset(OpenImagesDataset):
"""Open Images Challenge dataset for detection.
Args:
ann_file (str): Open Images Challenge box annotation in txt format.
"""
METAINFO: dict = dict(dataset_type='oid_challenge')
def __init__(self, ann_file: str, **kwargs) -> None:
if not ann_file.endswith('txt'):
raise TypeError('The annotation file of Open Images Challenge '
'should be a txt file.')
super().__init__(ann_file=ann_file, **kwargs)
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
"""
classes_names, label_id_mapping = self._parse_label_file(
self.label_file)
self._metainfo['classes'] = classes_names
self.label_id_mapping = label_id_mapping
if self.image_level_ann_file is not None:
img_level_anns = self._parse_img_level_ann(
self.image_level_ann_file)
else:
img_level_anns = None
# OpenImagesMetric can get the relation matrix from the dataset meta
relation_matrix = self._get_relation_matrix(self.hierarchy_file)
self._metainfo['RELATION_MATRIX'] = relation_matrix
data_list = []
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
lines = f.readlines()
i = 0
while i < len(lines):
instances = []
filename = lines[i].rstrip()
i += 2
img_gt_size = int(lines[i])
i += 1
for j in range(img_gt_size):
sp = lines[i + j].split()
instances.append(
dict(
bbox=[
float(sp[1]),
float(sp[2]),
float(sp[3]),
float(sp[4])
],
bbox_label=int(sp[0]) - 1, # labels begin from 1
ignore_flag=0,
is_group_ofs=True if int(sp[5]) == 1 else False))
i += img_gt_size
data_list.append(
dict(
img_path=osp.join(self.data_prefix['img'], filename),
instances=instances,
))
# add image metas to data list
img_metas = load(
self.meta_file, file_format='pkl', backend_args=self.backend_args)
assert len(img_metas) == len(data_list)
for i, meta in enumerate(img_metas):
img_id = osp.split(data_list[i]['img_path'])[-1][:-4]
assert img_id == osp.split(meta['filename'])[-1][:-4]
h, w = meta['ori_shape'][:2]
data_list[i]['height'] = h
data_list[i]['width'] = w
data_list[i]['img_id'] = img_id
# denormalize bboxes
for j in range(len(data_list[i]['instances'])):
data_list[i]['instances'][j]['bbox'][0] *= w
data_list[i]['instances'][j]['bbox'][2] *= w
data_list[i]['instances'][j]['bbox'][1] *= h
data_list[i]['instances'][j]['bbox'][3] *= h
# add image-level annotation
if img_level_anns is not None:
img_labels = []
confidences = []
img_ann_list = img_level_anns.get(img_id, [])
for ann in img_ann_list:
img_labels.append(int(ann['image_level_label']))
confidences.append(float(ann['confidence']))
data_list[i]['image_level_labels'] = np.array(
img_labels, dtype=np.int64)
data_list[i]['confidences'] = np.array(
confidences, dtype=np.float32)
return data_list
def _parse_label_file(self, label_file: str) -> tuple:
"""Get classes name and index mapping from cls-label-description file.
Args:
label_file (str): File path of the label description file that
maps the classes names in MID format to their short
descriptions.
Returns:
tuple: Class name of OpenImages.
"""
label_list = []
id_list = []
index_mapping = {}
with get_local_path(
label_file, backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
reader = csv.reader(f)
for line in reader:
label_name = line[0]
label_id = int(line[2])
label_list.append(line[1])
id_list.append(label_id)
index_mapping[label_name] = label_id - 1
indexes = np.argsort(id_list)
classes_names = []
for index in indexes:
classes_names.append(label_list[index])
return classes_names, index_mapping
def _parse_img_level_ann(self, image_level_ann_file):
"""Parse image level annotations from csv style ann_file.
Args:
image_level_ann_file (str): CSV style image level annotation
file path.
Returns:
defaultdict[list[dict]]: Annotations where item of the defaultdict
indicates an image, each of which has (n) dicts.
Keys of dicts are:
- `image_level_label` (int): of shape 1.
- `confidence` (float): of shape 1.
"""
item_lists = defaultdict(list)
with get_local_path(
image_level_ann_file,
backend_args=self.backend_args) as local_path:
with open(local_path, 'r') as f:
reader = csv.reader(f)
i = -1
for line in reader:
i += 1
if i == 0:
continue
else:
img_id = line[0]
label_id = line[1]
assert label_id in self.label_id_mapping
image_level_label = int(
self.label_id_mapping[label_id])
confidence = float(line[2])
item_lists[img_id].append(
dict(
image_level_label=image_level_label,
confidence=confidence))
return item_lists
def _get_relation_matrix(self, hierarchy_file: str) -> np.ndarray:
"""Get the matrix of class hierarchy from the hierarchy file.
Args:
hierarchy_file (str): File path to the hierarchy for classes.
Returns:
np.ndarray: The matrix of the corresponding
relationship between the parent class and the child class,
of shape (class_num, class_num).
"""
with get_local_path(
hierarchy_file, backend_args=self.backend_args) as local_path:
class_label_tree = np.load(local_path, allow_pickle=True)
return class_label_tree[1:, 1:]
| 20,079 | 40.402062 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/dataset_wrappers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
import copy
from typing import Sequence, Union
from mmengine.dataset import BaseDataset, force_full_init
from mmdet.registry import DATASETS, TRANSFORMS
@DATASETS.register_module()
class MultiImageMixDataset:
"""A wrapper of multiple images mixed dataset.
Suitable for training on multiple images mixed data augmentation like
mosaic and mixup. For the augmentation pipeline of mixed image data,
the `get_indexes` method needs to be provided to obtain the image
indexes, and you can set `skip_flags` to change the pipeline running
process. At the same time, we provide the `dynamic_scale` parameter
to dynamically change the output image size.
Args:
dataset (:obj:`CustomDataset`): The dataset to be mixed.
pipeline (Sequence[dict]): Sequence of transform object or
config dict to be composed.
dynamic_scale (tuple[int], optional): The image scale can be changed
dynamically. Default to None. It is deprecated.
skip_type_keys (list[str], optional): Sequence of type string to
be skip pipeline. Default to None.
max_refetch (int): The maximum number of retry iterations for getting
valid results from the pipeline. If the number of iterations is
greater than `max_refetch`, but results is still None, then the
iteration is terminated and raise the error. Default: 15.
"""
def __init__(self,
dataset: Union[BaseDataset, dict],
pipeline: Sequence[str],
skip_type_keys: Union[Sequence[str], None] = None,
max_refetch: int = 15,
lazy_init: bool = False) -> None:
assert isinstance(pipeline, collections.abc.Sequence)
if skip_type_keys is not None:
assert all([
isinstance(skip_type_key, str)
for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = TRANSFORMS.build(transform)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset: BaseDataset
if isinstance(dataset, dict):
self.dataset = DATASETS.build(dataset)
elif isinstance(dataset, BaseDataset):
self.dataset = dataset
else:
raise TypeError(
'elements in datasets sequence should be config or '
f'`BaseDataset` instance, but got {type(dataset)}')
self._metainfo = self.dataset.metainfo
if hasattr(self.dataset, 'flag'):
self.flag = self.dataset.flag
self.num_samples = len(self.dataset)
self.max_refetch = max_refetch
self._fully_initialized = False
if not lazy_init:
self.full_init()
@property
def metainfo(self) -> dict:
"""Get the meta information of the multi-image-mixed dataset.
Returns:
dict: The meta information of multi-image-mixed dataset.
"""
return copy.deepcopy(self._metainfo)
def full_init(self):
"""Loop to ``full_init`` each dataset."""
if self._fully_initialized:
return
self.dataset.full_init()
self._ori_len = len(self.dataset)
self._fully_initialized = True
@force_full_init
def get_data_info(self, idx: int) -> dict:
"""Get annotation by index.
Args:
idx (int): Global index of ``ConcatDataset``.
Returns:
dict: The idx-th annotation of the datasets.
"""
return self.dataset.get_data_info(idx)
@force_full_init
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline,
self.pipeline_types):
if self._skip_type_keys is not None and \
transform_type in self._skip_type_keys:
continue
if hasattr(transform, 'get_indexes'):
for i in range(self.max_refetch):
# Make sure the results passed the loading pipeline
# of the original dataset is not None.
indexes = transform.get_indexes(self.dataset)
if not isinstance(indexes, collections.abc.Sequence):
indexes = [indexes]
mix_results = [
copy.deepcopy(self.dataset[index]) for index in indexes
]
if None not in mix_results:
results['mix_results'] = mix_results
break
else:
raise RuntimeError(
'The loading pipeline of the original dataset'
' always return None. Please check the correctness '
'of the dataset and its pipeline.')
for i in range(self.max_refetch):
# To confirm the results passed the training pipeline
# of the wrapper is not None.
updated_results = transform(copy.deepcopy(results))
if updated_results is not None:
results = updated_results
break
else:
raise RuntimeError(
'The training pipeline of the dataset wrapper'
' always return None.Please check the correctness '
'of the dataset and its pipeline.')
if 'mix_results' in results:
results.pop('mix_results')
return results
def update_skip_type_keys(self, skip_type_keys):
"""Update skip_type_keys. It is called by an external hook.
Args:
skip_type_keys (list[str], optional): Sequence of type
string to be skip pipeline.
"""
assert all([
isinstance(skip_type_key, str) for skip_type_key in skip_type_keys
])
self._skip_type_keys = skip_type_keys
| 6,474 | 37.088235 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/xml_style.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
from typing import List, Optional, Union
import mmcv
from mmengine.fileio import get, get_local_path, list_from_file
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class XMLDataset(BaseDetDataset):
"""XML dataset for detection.
Args:
img_subdir (str): Subdir where images are stored. Default: JPEGImages.
ann_subdir (str): Subdir where annotations are. Default: Annotations.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
img_subdir: str = 'JPEGImages',
ann_subdir: str = 'Annotations',
**kwargs) -> None:
self.img_subdir = img_subdir
self.ann_subdir = ann_subdir
super().__init__(**kwargs)
@property
def sub_data_root(self) -> str:
"""Return the sub data root."""
return self.data_prefix.get('sub_data_root', '')
def load_data_list(self) -> List[dict]:
"""Load annotation from XML style ann_file.
Returns:
list[dict]: Annotation info from XML file.
"""
assert self._metainfo.get('classes', None) is not None, \
'`classes` in `XMLDataset` can not be None.'
self.cat2label = {
cat: i
for i, cat in enumerate(self._metainfo['classes'])
}
data_list = []
img_ids = list_from_file(self.ann_file, backend_args=self.backend_args)
for img_id in img_ids:
file_name = osp.join(self.img_subdir, f'{img_id}.jpg')
xml_path = osp.join(self.sub_data_root, self.ann_subdir,
f'{img_id}.xml')
raw_img_info = {}
raw_img_info['img_id'] = img_id
raw_img_info['file_name'] = file_name
raw_img_info['xml_path'] = xml_path
parsed_data_info = self.parse_data_info(raw_img_info)
data_list.append(parsed_data_info)
return data_list
@property
def bbox_min_size(self) -> Optional[str]:
"""Return the minimum size of bounding boxes in the images."""
if self.filter_cfg is not None:
return self.filter_cfg.get('bbox_min_size', None)
else:
return None
def parse_data_info(self, img_info: dict) -> Union[dict, List[dict]]:
"""Parse raw annotation to target format.
Args:
img_info (dict): Raw image information, usually it includes
`img_id`, `file_name`, and `xml_path`.
Returns:
Union[dict, List[dict]]: Parsed annotation.
"""
data_info = {}
img_path = osp.join(self.sub_data_root, img_info['file_name'])
data_info['img_path'] = img_path
data_info['img_id'] = img_info['img_id']
data_info['xml_path'] = img_info['xml_path']
# deal with xml file
with get_local_path(
img_info['xml_path'],
backend_args=self.backend_args) as local_path:
raw_ann_info = ET.parse(local_path)
root = raw_ann_info.getroot()
size = root.find('size')
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_bytes = get(img_path, backend_args=self.backend_args)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
height, width = img.shape[:2]
del img, img_bytes
data_info['height'] = height
data_info['width'] = width
data_info['instances'] = self._parse_instance_info(
raw_ann_info, minus_one=True)
return data_info
def _parse_instance_info(self,
raw_ann_info: ET,
minus_one: bool = True) -> List[dict]:
"""parse instance information.
Args:
raw_ann_info (ElementTree): ElementTree object.
minus_one (bool): Whether to subtract 1 from the coordinates.
Defaults to True.
Returns:
List[dict]: List of instances.
"""
instances = []
for obj in raw_ann_info.findall('object'):
instance = {}
name = obj.find('name').text
if name not in self._metainfo['classes']:
continue
difficult = obj.find('difficult')
difficult = 0 if difficult is None else int(difficult.text)
bnd_box = obj.find('bndbox')
bbox = [
int(float(bnd_box.find('xmin').text)),
int(float(bnd_box.find('ymin').text)),
int(float(bnd_box.find('xmax').text)),
int(float(bnd_box.find('ymax').text))
]
# VOC needs to subtract 1 from the coordinates
if minus_one:
bbox = [x - 1 for x in bbox]
ignore = False
if self.bbox_min_size is not None:
assert not self.test_mode
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w < self.bbox_min_size or h < self.bbox_min_size:
ignore = True
if difficult or ignore:
instance['ignore_flag'] = 1
else:
instance['ignore_flag'] = 0
instance['bbox'] = bbox
instance['bbox_label'] = self.cat2label[name]
instances.append(instance)
return instances
def filter_data(self) -> List[dict]:
"""Filter annotations according to filter_cfg.
Returns:
List[dict]: Filtered results.
"""
if self.test_mode:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False) \
if self.filter_cfg is not None else False
min_size = self.filter_cfg.get('min_size', 0) \
if self.filter_cfg is not None else 0
valid_data_infos = []
for i, data_info in enumerate(self.data_list):
width = data_info['width']
height = data_info['height']
if filter_empty_gt and len(data_info['instances']) == 0:
continue
if min(width, height) >= min_size:
valid_data_infos.append(data_info)
return valid_data_infos
| 6,524 | 33.893048 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset'
]
| 1,325 | 46.357143 | 78 |
py
|
ERD
|
ERD-main/mmdet/datasets/lvis.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from typing import List
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class LVISV05Dataset(CocoDataset):
"""LVIS v0.5 dataset for detection."""
METAINFO = {
'classes':
('acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',
'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',
'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',
'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',
'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',
'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',
'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',
'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',
'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',
'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',
'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',
'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',
'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball',
'bead', 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',
'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle',
'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle',
'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder',
'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage',
'birdhouse', 'birthday_cake', 'birthday_card', 'biscuit_(bread)',
'pirate_flag', 'black_sheep', 'blackboard', 'blanket', 'blazer',
'blender', 'blimp', 'blinker', 'blueberry', 'boar', 'gameboard',
'boat', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt',
'bolt', 'bonnet', 'book', 'book_bag', 'bookcase', 'booklet',
'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener',
'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie',
'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',
'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',
'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',
'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',
'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',
'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',
'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',
'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',
'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',
'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',
'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',
'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',
'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',
'celery', 'cellular_telephone', 'chain_mail', 'chair',
'chaise_longue', 'champagne', 'chandelier', 'chap', 'checkbook',
'checkerboard', 'cherry', 'chessboard',
'chest_of_drawers_(furniture)', 'chicken_(animal)', 'chicken_wire',
'chickpea', 'Chihuahua', 'chili_(vegetable)', 'chime', 'chinaware',
'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
'clasp', 'cleansing_agent', 'clementine', 'clip', 'clipboard',
'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag',
'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'coconut',
'coffee_filter', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil',
'coin', 'colander', 'coleslaw', 'coloring_material',
'combination_lock', 'pacifier', 'comic_book', 'computer_keyboard',
'concrete_mixer', 'cone', 'control', 'convertible_(automobile)',
'sofa_bed', 'cookie', 'cookie_jar', 'cooking_utensil',
'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew',
'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',
'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',
'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',
'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',
'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',
'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',
'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',
'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',
'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table',
'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',
'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',
'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',
'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',
'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',
'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',
'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)',
'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose',
'fireplace', 'fireplug', 'fish', 'fish_(food)', 'fishbowl',
'fishing_boat', 'fishing_rod', 'flag', 'flagpole', 'flamingo',
'flannel', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
'folding_chair', 'food_processor', 'football_(American)',
'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
'fruit_salad', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag',
'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle',
'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',
'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',
'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',
'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',
'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',
'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',
'headband', 'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',
'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',
'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',
'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',
'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',
'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',
'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',
'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',
'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',
'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',
'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',
'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',
'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',
'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',
'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',
'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',
'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',
'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine',
'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',
'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',
'mascot', 'mashed_potato', 'masher', 'mask', 'mast',
'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',
'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',
'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',
'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',
'mound_(baseball)', 'mouse_(animal_rodent)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'nameplate',
'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest',
'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'oregano',
'ostrich', 'ottoman', 'overalls_(clothing)', 'owl', 'packet',
'inkpad', 'pad', 'paddle', 'padlock', 'paintbox', 'paintbrush',
'painting', 'pajamas', 'palette', 'pan_(for_cooking)',
'pan_(metal_container)', 'pancake', 'pantyhose', 'papaya',
'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',
'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',
'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',
'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playing_card', 'playpen', 'pliers',
'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',
'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',
'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot',
'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn',
'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'red_cabbage', 'reflector',
'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',
'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',
'Rollerblade', 'rolling_pin', 'root_beer',
'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',
'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',
'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',
'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',
'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',
'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',
'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',
'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',
'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',
'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',
'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag',
'shopping_cart', 'short_pants', 'shot_glass', 'shoulder_bag',
'shovel', 'shower_head', 'shower_curtain', 'shredder_(for_paper)',
'sieve', 'signboard', 'silo', 'sink', 'skateboard', 'skewer', 'ski',
'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'sled', 'sleeping_bag',
'sling_(bandage)', 'slipper_(footwear)', 'smoothie', 'snake',
'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock',
'soda_fountain', 'carbonated_water', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'sponge',
'spoon', 'sportswear', 'spotlight', 'squirrel',
'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)',
'steak_(food)', 'steak_knife', 'steamer_(kitchen_appliance)',
'steering_wheel', 'stencil', 'stepladder', 'step_stool',
'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup',
'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light', 'stove',
'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',
'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',
'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',
'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',
'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',
'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',
'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer',
'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster',
'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs',
'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover',
'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy',
'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike',
'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray',
'tree_house', 'trench_coat', 'triangle_(musical_instrument)',
'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',
'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',
'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',
'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',
'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',
'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',
'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',
'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',
'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',
'water_heater', 'water_jug', 'water_gun', 'water_scooter',
'water_ski', 'water_tower', 'watering_can', 'watermelon',
'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit',
'wheel', 'wheelchair', 'whipped_cream', 'whiskey', 'whistle', 'wick',
'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'wing_chair', 'blinder_(for_horses)', 'wok', 'wolf',
'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht',
'yak', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini'),
'palette':
None
}
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
self.lvis = LVIS(local_path)
self.cat_ids = self.lvis.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)
img_ids = self.lvis.get_img_ids()
data_list = []
total_ann_ids = []
for img_id in img_ids:
raw_img_info = self.lvis.load_imgs([img_id])[0]
raw_img_info['img_id'] = img_id
if raw_img_info['file_name'].startswith('COCO'):
# Convert form the COCO 2014 file naming convention of
# COCO_[train/val/test]2014_000000000000.jpg to the 2017
# naming convention of 000000000000.jpg
# (LVIS v1 will fix this naming issue)
raw_img_info['file_name'] = raw_img_info['file_name'][-16:]
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
raw_ann_info = self.lvis.load_anns(ann_ids)
total_ann_ids.extend(ann_ids)
parsed_data_info = self.parse_data_info({
'raw_ann_info':
raw_ann_info,
'raw_img_info':
raw_img_info
})
data_list.append(parsed_data_info)
if self.ANN_ID_UNIQUE:
assert len(set(total_ann_ids)) == len(
total_ann_ids
), f"Annotation ids in '{self.ann_file}' are not unique!"
del self.lvis
return data_list
LVISDataset = LVISV05Dataset
DATASETS.register_module(name='LVISDataset', module=LVISDataset)
@DATASETS.register_module()
class LVISV1Dataset(LVISDataset):
"""LVIS v1 dataset for detection."""
METAINFO = {
'classes':
('aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',
'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',
'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)',
'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl',
'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders',
'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread',
'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach',
'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket',
'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train',
'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed',
'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter',
'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet',
'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder',
'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can',
'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane',
'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen',
'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
'cash_register', 'casserole', 'cassette', 'cast', 'cat',
'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery',
'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',
'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard',
'cherry', 'chessboard', 'chicken_(animal)', 'chickpea',
'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',
'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',
'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',
'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',
'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',
'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard',
'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower',
'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',
'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)',
'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil',
'coin', 'colander', 'coleslaw', 'coloring_material',
'combination_lock', 'pacifier', 'comic_book', 'compass',
'computer_keyboard', 'condiment', 'cone', 'control',
'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table',
'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove',
'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat',
'dress_suit', 'dresser', 'drill', 'drone', 'dropper',
'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',
'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle',
'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg',
'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair',
'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot',
'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret',
'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine',
'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine',
'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug',
'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod',
'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash',
'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',
'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',
'food_processor', 'football_(American)', 'football_helmet',
'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',
'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge',
'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose',
'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin',
'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger',
'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove',
'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart',
'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater',
'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',
'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun',
'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger',
'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass',
'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle',
'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil',
'headband', 'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade',
'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',
'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange',
'manger', 'manhole', 'map', 'marker', 'martini', 'mascot',
'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)',
'matchbox', 'mattress', 'measuring_cup', 'measuring_stick',
'meatball', 'medicine', 'melon', 'microphone', 'microscope',
'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake',
'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)',
'money', 'monitor_(computer_equipment) computer_monitor', 'monkey',
'motor', 'motor_scooter', 'motor_vehicle', 'motorcycle',
'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad',
'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument',
'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle',
'nest', 'newspaper', 'newsstand', 'nightshirt',
'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook',
'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',
'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',
'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven',
'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',
'padlock', 'paintbrush', 'painting', 'pajamas', 'palette',
'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',
'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',
'parasol', 'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot',
'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn',
'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller',
'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin',
'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt',
'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver',
'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry',
'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'reflector', 'remote_control',
'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
'rolling_pin', 'root_beer', 'router_(computer_equipment)',
'rubber_band', 'runner_(carpet)', 'plastic_bag',
'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew',
'stirrer', 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove',
'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',
'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',
'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants',
'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit',
'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer',
'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster',
'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs',
'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover',
'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy',
'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike',
'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray',
'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod',
'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban',
'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
'washbasin', 'automatic_washer', 'watch', 'water_bottle',
'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini'),
'palette':
None
}
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
self.lvis = LVIS(local_path)
self.cat_ids = self.lvis.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)
img_ids = self.lvis.get_img_ids()
data_list = []
total_ann_ids = []
for img_id in img_ids:
raw_img_info = self.lvis.load_imgs([img_id])[0]
raw_img_info['img_id'] = img_id
# coco_url is used in LVISv1 instead of file_name
# e.g. http://images.cocodataset.org/train2017/000000391895.jpg
# train/val split in specified in url
raw_img_info['file_name'] = raw_img_info['coco_url'].replace(
'http://images.cocodataset.org/', '')
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
raw_ann_info = self.lvis.load_anns(ann_ids)
total_ann_ids.extend(ann_ids)
parsed_data_info = self.parse_data_info({
'raw_ann_info':
raw_ann_info,
'raw_img_info':
raw_img_info
})
data_list.append(parsed_data_info)
if self.ANN_ID_UNIQUE:
assert len(set(total_ann_ids)) == len(
total_ann_ids
), f"Annotation ids in '{self.ann_file}' are not unique!"
del self.lvis
return data_list
| 41,457 | 63.879499 | 157 |
py
|
ERD
|
ERD-main/mmdet/datasets/objects365.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
from typing import List
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .api_wrappers import COCO
from .coco import CocoDataset
# images exist in annotations but not in image folder.
objv2_ignore_list = [
osp.join('patch16', 'objects365_v2_00908726.jpg'),
osp.join('patch6', 'objects365_v1_00320532.jpg'),
osp.join('patch6', 'objects365_v1_00320534.jpg'),
]
@DATASETS.register_module()
class Objects365V1Dataset(CocoDataset):
"""Objects365 v1 dataset for detection."""
METAINFO = {
'classes':
('person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle',
'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk',
'handbag', 'street lights', 'book', 'plate', 'helmet',
'leather shoes', 'pillow', 'glove', 'potted plant', 'bracelet',
'flower', 'tv', 'storage box', 'vase', 'bench', 'wine glass', 'boots',
'bowl', 'dining table', 'umbrella', 'boat', 'flag', 'speaker',
'trash bin/can', 'stool', 'backpack', 'couch', 'belt', 'carpet',
'basket', 'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table',
'suv', 'toy', 'tie', 'bed', 'traffic light', 'pen/pencil',
'microphone', 'sandals', 'canned', 'necklace', 'mirror', 'faucet',
'bicycle', 'bread', 'high heels', 'ring', 'van', 'watch', 'sink',
'horse', 'fish', 'apple', 'camera', 'candle', 'teddy bear', 'cake',
'motorcycle', 'wild bird', 'laptop', 'knife', 'traffic sign',
'cell phone', 'paddle', 'truck', 'cow', 'power outlet', 'clock',
'drum', 'fork', 'bus', 'hanger', 'nightstand', 'pot/pan', 'sheep',
'guitar', 'traffic cone', 'tea pot', 'keyboard', 'tripod', 'hockey',
'fan', 'dog', 'spoon', 'blackboard/whiteboard', 'balloon',
'air conditioner', 'cymbal', 'mouse', 'telephone', 'pickup truck',
'orange', 'banana', 'airplane', 'luggage', 'skis', 'soccer',
'trolley', 'oven', 'remote', 'baseball glove', 'paper towel',
'refrigerator', 'train', 'tomato', 'machinery vehicle', 'tent',
'shampoo/shower gel', 'head phone', 'lantern', 'donut',
'cleaning products', 'sailboat', 'tangerine', 'pizza', 'kite',
'computer box', 'elephant', 'toiletries', 'gas stove', 'broccoli',
'toilet', 'stroller', 'shovel', 'baseball bat', 'microwave',
'skateboard', 'surfboard', 'surveillance camera', 'gun', 'life saver',
'cat', 'lemon', 'liquid soap', 'zebra', 'duck', 'sports car',
'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator', 'converter',
'tissue ', 'carrot', 'washing machine', 'vent', 'cookies',
'cutting/chopping board', 'tennis racket', 'candy',
'skating and skiing shoes', 'scissors', 'folder', 'baseball',
'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine',
'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear',
'american football', 'basketball', 'potato', 'paint brush', 'printer',
'billiards', 'fire hydrant', 'goose', 'projector', 'sausage',
'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball',
'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee',
'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender',
'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango',
'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion',
'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale',
'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple',
'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle',
'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar',
'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD',
'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado',
'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear',
'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn',
'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball',
'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice',
'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel',
'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste',
'antelope', 'shrimp', 'rickshaw', 'trombone', 'pomegranate',
'coconut', 'jellyfish', 'mushroom', 'calculator', 'treadmill',
'butterfly', 'egg tart', 'cheese', 'pig', 'pomelo', 'race car',
'rice cooker', 'tuba', 'crosswalk sign', 'papaya', 'hair drier',
'green onion', 'chips', 'dolphin', 'sushi', 'urinal', 'donkey',
'electric drill', 'spring rolls', 'tortoise/turtle', 'parrot',
'flute', 'measuring cup', 'shark', 'steak', 'poker card',
'binoculars', 'llama', 'radish', 'noodles', 'yak', 'mop', 'crab',
'microscope', 'barbell', 'bread/bun', 'baozi', 'lion', 'red cabbage',
'polar bear', 'lighter', 'seal', 'mangosteen', 'comb', 'eraser',
'pitaya', 'scallop', 'pencil case', 'saw', 'table tennis paddle',
'okra', 'starfish', 'eagle', 'monkey', 'durian', 'game board',
'rabbit', 'french horn', 'ambulance', 'asparagus', 'hoverboard',
'pasta', 'target', 'hotair balloon', 'chainsaw', 'lobster', 'iron',
'flashlight'),
'palette':
None
}
COCOAPI = COCO
# ann_id is unique in coco dataset.
ANN_ID_UNIQUE = True
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
self.coco = self.COCOAPI(local_path)
# 'categories' list in objects365_train.json and objects365_val.json
# is inconsistent, need sort list(or dict) before get cat_ids.
cats = self.coco.cats
sorted_cats = {i: cats[i] for i in sorted(cats)}
self.coco.cats = sorted_cats
categories = self.coco.dataset['categories']
sorted_categories = sorted(categories, key=lambda i: i['id'])
self.coco.dataset['categories'] = sorted_categories
# The order of returned `cat_ids` will not
# change with the order of the `classes`
self.cat_ids = self.coco.get_cat_ids(
cat_names=self.metainfo['classes'])
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)
img_ids = self.coco.get_img_ids()
data_list = []
total_ann_ids = []
for img_id in img_ids:
raw_img_info = self.coco.load_imgs([img_id])[0]
raw_img_info['img_id'] = img_id
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
raw_ann_info = self.coco.load_anns(ann_ids)
total_ann_ids.extend(ann_ids)
parsed_data_info = self.parse_data_info({
'raw_ann_info':
raw_ann_info,
'raw_img_info':
raw_img_info
})
data_list.append(parsed_data_info)
if self.ANN_ID_UNIQUE:
assert len(set(total_ann_ids)) == len(
total_ann_ids
), f"Annotation ids in '{self.ann_file}' are not unique!"
del self.coco
return data_list
@DATASETS.register_module()
class Objects365V2Dataset(CocoDataset):
"""Objects365 v2 dataset for detection."""
METAINFO = {
'classes':
('Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp',
'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf',
'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet',
'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower',
'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots',
'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt',
'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker',
'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool',
'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum',
'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle',
'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned',
'Truck', 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel',
'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed',
'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple',
'Air Conditioner', 'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck',
'Fork', 'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock',
'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger',
'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine',
'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle',
'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane',
'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage',
'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone',
'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane',
'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',
'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza',
'Elephant', 'Skateboard', 'Surfboard', 'Gun',
'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot',
'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper',
'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',
'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board',
'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder',
'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball',
'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle',
'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck',
'Billards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club',
'Briefcase', 'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear',
'Heavy Truck', 'Hamburger', 'Extractor', 'Extention Cord', 'Tong',
'Tennis Racket', 'Folder', 'American Football', 'earphone', 'Mask',
'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', 'Slide',
'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee',
'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon',
'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon',
'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog',
'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer',
'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple',
'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle',
'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone',
'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion',
'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom',
'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',
'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese',
'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue',
'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap',
'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut',
'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak',
'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate',
'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker',
'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal',
'Buttefly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin',
'Electric Drill', 'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill',
'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi',
'Target', 'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case',
'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop',
'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle',
'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster',
'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling',
'Table Tennis '),
'palette':
None
}
COCOAPI = COCO
# ann_id is unique in coco dataset.
ANN_ID_UNIQUE = True
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
self.coco = self.COCOAPI(local_path)
# The order of returned `cat_ids` will not
# change with the order of the `classes`
self.cat_ids = self.coco.get_cat_ids(
cat_names=self.metainfo['classes'])
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)
img_ids = self.coco.get_img_ids()
data_list = []
total_ann_ids = []
for img_id in img_ids:
raw_img_info = self.coco.load_imgs([img_id])[0]
raw_img_info['img_id'] = img_id
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
raw_ann_info = self.coco.load_anns(ann_ids)
total_ann_ids.extend(ann_ids)
# file_name should be `patchX/xxx.jpg`
file_name = osp.join(
osp.split(osp.split(raw_img_info['file_name'])[0])[-1],
osp.split(raw_img_info['file_name'])[-1])
if file_name in objv2_ignore_list:
continue
raw_img_info['file_name'] = file_name
parsed_data_info = self.parse_data_info({
'raw_ann_info':
raw_ann_info,
'raw_img_info':
raw_img_info
})
data_list.append(parsed_data_info)
if self.ANN_ID_UNIQUE:
assert len(set(total_ann_ids)) == len(
total_ann_ids
), f"Annotation ids in '{self.ann_file}' are not unique!"
del self.coco
return data_list
| 14,792 | 50.905263 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/coco_panoptic.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Callable, List, Optional, Sequence, Union
from mmdet.registry import DATASETS
from .api_wrappers import COCOPanoptic
from .coco import CocoDataset
@DATASETS.register_module()
class CocoPanopticDataset(CocoDataset):
"""Coco dataset for Panoptic segmentation.
The annotation format is shown as follows. The `ann` field is optional
for testing.
.. code-block:: none
[
{
'filename': f'{image_id:012}.png',
'image_id':9
'segments_info':
[
{
'id': 8345037, (segment_id in panoptic png,
convert from rgb)
'category_id': 51,
'iscrowd': 0,
'bbox': (x1, y1, w, h),
'area': 24315
},
...
]
},
...
]
Args:
ann_file (str): Annotation file path. Defaults to ''.
metainfo (dict, optional): Meta information for dataset, such as class
information. Defaults to None.
data_root (str, optional): The root directory for ``data_prefix`` and
``ann_file``. Defaults to None.
data_prefix (dict, optional): Prefix for training data. Defaults to
``dict(img=None, ann=None, seg=None)``. The prefix ``seg`` which is
for panoptic segmentation map must be not None.
filter_cfg (dict, optional): Config for filter data. Defaults to None.
indices (int or Sequence[int], optional): Support using first few
data in annotation file to facilitate training/testing on a smaller
dataset. Defaults to None which means using all ``data_infos``.
serialize_data (bool, optional): Whether to hold memory using
serialized objects, when enabled, data loader workers can use
shared RAM from master process instead of making a copy. Defaults
to True.
pipeline (list, optional): Processing pipeline. Defaults to [].
test_mode (bool, optional): ``test_mode=True`` means in test phase.
Defaults to False.
lazy_init (bool, optional): Whether to load annotation during
instantiation. In some cases, such as visualization, only the meta
information of the dataset is needed, which is not necessary to
load annotation file. ``Basedataset`` can skip load annotations to
save time by set ``lazy_init=False``. Defaults to False.
max_refetch (int, optional): If ``Basedataset.prepare_data`` get a
None img. The maximum extra number of cycles to get a valid
image. Defaults to 1000.
"""
METAINFO = {
'classes':
('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',
'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',
'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'),
'thing_classes':
('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'),
'stuff_classes':
('banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain',
'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house',
'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'),
'palette':
[(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228),
(0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30),
(100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30),
(165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255),
(0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255),
(199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92),
(209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164),
(92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0),
(174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174),
(255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54),
(207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51),
(74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65),
(0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0),
(227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161),
(163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120),
(183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133),
(166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62),
(65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45),
(196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1),
(246, 0, 122), (191, 162, 208), (255, 255, 128), (147, 211, 203),
(150, 100, 100), (168, 171, 172), (146, 112, 198), (210, 170, 100),
(92, 136, 89), (218, 88, 184), (241, 129, 0), (217, 17, 255),
(124, 74, 181), (70, 70, 70), (255, 228, 255), (154, 208, 0),
(193, 0, 92), (76, 91, 113), (255, 180, 195), (106, 154, 176),
(230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55),
(254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255),
(104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74),
(135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149),
(183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153),
(146, 139, 141), (70, 130, 180), (134, 199, 156), (209, 226, 140),
(96, 36, 108), (96, 96, 96), (64, 170, 64), (152, 251, 152),
(208, 229, 228), (206, 186, 171), (152, 161, 64), (116, 112, 0),
(0, 114, 143), (102, 102, 156), (250, 141, 255)]
}
COCOAPI = COCOPanoptic
# ann_id is not unique in coco panoptic dataset.
ANN_ID_UNIQUE = False
def __init__(self,
ann_file: str = '',
metainfo: Optional[dict] = None,
data_root: Optional[str] = None,
data_prefix: dict = dict(img=None, ann=None, seg=None),
filter_cfg: Optional[dict] = None,
indices: Optional[Union[int, Sequence[int]]] = None,
serialize_data: bool = True,
pipeline: List[Union[dict, Callable]] = [],
test_mode: bool = False,
lazy_init: bool = False,
max_refetch: int = 1000,
backend_args: dict = None,
**kwargs) -> None:
super().__init__(
ann_file=ann_file,
metainfo=metainfo,
data_root=data_root,
data_prefix=data_prefix,
filter_cfg=filter_cfg,
indices=indices,
serialize_data=serialize_data,
pipeline=pipeline,
test_mode=test_mode,
lazy_init=lazy_init,
max_refetch=max_refetch,
backend_args=backend_args,
**kwargs)
def parse_data_info(self, raw_data_info: dict) -> dict:
"""Parse raw annotation to target format.
Args:
raw_data_info (dict): Raw data information load from ``ann_file``.
Returns:
dict: Parsed annotation.
"""
img_info = raw_data_info['raw_img_info']
ann_info = raw_data_info['raw_ann_info']
# filter out unmatched annotations which have
# same segment_id but belong to other image
ann_info = [
ann for ann in ann_info if ann['image_id'] == img_info['img_id']
]
data_info = {}
img_path = osp.join(self.data_prefix['img'], img_info['file_name'])
if self.data_prefix.get('seg', None):
seg_map_path = osp.join(
self.data_prefix['seg'],
img_info['file_name'].replace('jpg', 'png'))
else:
seg_map_path = None
data_info['img_path'] = img_path
data_info['img_id'] = img_info['img_id']
data_info['seg_map_path'] = seg_map_path
data_info['height'] = img_info['height']
data_info['width'] = img_info['width']
instances = []
segments_info = []
for ann in ann_info:
instance = {}
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w, y1 + h]
category_id = ann['category_id']
contiguous_cat_id = self.cat2label[category_id]
is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']
if is_thing:
is_crowd = ann.get('iscrowd', False)
instance['bbox'] = bbox
instance['bbox_label'] = contiguous_cat_id
if not is_crowd:
instance['ignore_flag'] = 0
else:
instance['ignore_flag'] = 1
is_thing = False
segment_info = {
'id': ann['id'],
'category': contiguous_cat_id,
'is_thing': is_thing
}
segments_info.append(segment_info)
if len(instance) > 0 and is_thing:
instances.append(instance)
data_info['instances'] = instances
data_info['segments_info'] = segments_info
return data_info
def filter_data(self) -> List[dict]:
"""Filter images too small or without ground truth.
Returns:
List[dict]: ``self.data_list`` after filtering.
"""
if self.test_mode:
return self.data_list
if self.filter_cfg is None:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
ids_with_ann = set()
# check whether images have legal thing annotations.
for data_info in self.data_list:
for segment_info in data_info['segments_info']:
if not segment_info['is_thing']:
continue
ids_with_ann.add(data_info['img_id'])
valid_data_list = []
for data_info in self.data_list:
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
if filter_empty_gt and img_id not in ids_with_ann:
continue
if min(width, height) >= min_size:
valid_data_list.append(data_info)
return valid_data_list
| 13,726 | 46.663194 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/coco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
from typing import List, Union
from mmengine.fileio import get_local_path
from mmdet.registry import DATASETS
from .api_wrappers import COCO
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class CocoDataset(BaseDetDataset):
"""Dataset for COCO."""
METAINFO = {
'classes':
('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'),
# palette is a list of color tuples, which is used for visualization.
'palette':
[(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228),
(0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30),
(100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30),
(165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255),
(0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255),
(199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92),
(209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164),
(92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0),
(174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174),
(255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54),
(207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51),
(74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65),
(0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0),
(227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161),
(163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120),
(183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133),
(166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62),
(65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45),
(196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1),
(246, 0, 122), (191, 162, 208)]
}
COCOAPI = COCO
# ann_id is unique in coco dataset.
ANN_ID_UNIQUE = True
def load_data_list(self) -> List[dict]:
"""Load annotations from an annotation file named as ``self.ann_file``
Returns:
List[dict]: A list of annotation.
""" # noqa: E501
with get_local_path(
self.ann_file, backend_args=self.backend_args) as local_path:
self.coco = self.COCOAPI(local_path)
# The order of returned `cat_ids` will not
# change with the order of the `classes`
self.cat_ids = self.coco.get_cat_ids(
cat_names=self.metainfo['classes'])
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cat_img_map = copy.deepcopy(self.coco.cat_img_map)
img_ids = self.coco.get_img_ids()
data_list = []
total_ann_ids = []
for img_id in img_ids:
raw_img_info = self.coco.load_imgs([img_id])[0]
raw_img_info['img_id'] = img_id
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
raw_ann_info = self.coco.load_anns(ann_ids)
total_ann_ids.extend(ann_ids)
parsed_data_info = self.parse_data_info({
'raw_ann_info':
raw_ann_info,
'raw_img_info':
raw_img_info
})
data_list.append(parsed_data_info)
if self.ANN_ID_UNIQUE:
assert len(set(total_ann_ids)) == len(
total_ann_ids
), f"Annotation ids in '{self.ann_file}' are not unique!"
del self.coco
return data_list
def parse_data_info(self, raw_data_info: dict) -> Union[dict, List[dict]]:
"""Parse raw annotation to target format.
Args:
raw_data_info (dict): Raw data information load from ``ann_file``
Returns:
Union[dict, List[dict]]: Parsed annotation.
"""
img_info = raw_data_info['raw_img_info']
ann_info = raw_data_info['raw_ann_info']
data_info = {}
# TODO: need to change data_prefix['img'] to data_prefix['img_path']
img_path = osp.join(self.data_prefix['img'], img_info['file_name'])
if self.data_prefix.get('seg', None):
seg_map_path = osp.join(
self.data_prefix['seg'],
img_info['file_name'].rsplit('.', 1)[0] + self.seg_map_suffix)
else:
seg_map_path = None
data_info['img_path'] = img_path
data_info['img_id'] = img_info['img_id']
data_info['seg_map_path'] = seg_map_path
data_info['height'] = img_info['height']
data_info['width'] = img_info['width']
instances = []
for i, ann in enumerate(ann_info):
instance = {}
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
instance['ignore_flag'] = 1
else:
instance['ignore_flag'] = 0
instance['bbox'] = bbox
instance['bbox_label'] = self.cat2label[ann['category_id']]
if ann.get('segmentation', None):
instance['mask'] = ann['segmentation']
instances.append(instance)
data_info['instances'] = instances
return data_info
def filter_data(self) -> List[dict]:
"""Filter annotations according to filter_cfg.
Returns:
List[dict]: Filtered results.
"""
if self.test_mode:
return self.data_list
if self.filter_cfg is None:
return self.data_list
filter_empty_gt = self.filter_cfg.get('filter_empty_gt', False)
min_size = self.filter_cfg.get('min_size', 0)
# obtain images that contain annotation
ids_with_ann = set(data_info['img_id'] for data_info in self.data_list)
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_data_infos = []
for i, data_info in enumerate(self.data_list):
img_id = data_info['img_id']
width = data_info['width']
height = data_info['height']
if filter_empty_gt and img_id not in ids_in_cat:
continue
if min(width, height) >= min_size:
valid_data_infos.append(data_info)
return valid_data_infos
| 8,050 | 39.86802 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/wider_face.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
from mmengine.dist import is_main_process
from mmengine.fileio import get_local_path, list_from_file
from mmengine.utils import ProgressBar
from mmdet.registry import DATASETS
from mmdet.utils.typing_utils import List, Union
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
METAINFO = {'classes': ('face', ), 'palette': [(0, 255, 0)]}
def load_data_list(self) -> List[dict]:
"""Load annotation from XML style ann_file.
Returns:
list[dict]: Annotation info from XML file.
"""
assert self._metainfo.get('classes', None) is not None, \
'classes in `XMLDataset` can not be None.'
self.cat2label = {
cat: i
for i, cat in enumerate(self._metainfo['classes'])
}
data_list = []
img_ids = list_from_file(self.ann_file, backend_args=self.backend_args)
# loading process takes around 10 mins
if is_main_process():
prog_bar = ProgressBar(len(img_ids))
for img_id in img_ids:
raw_img_info = {}
raw_img_info['img_id'] = img_id
raw_img_info['file_name'] = f'{img_id}.jpg'
parsed_data_info = self.parse_data_info(raw_img_info)
data_list.append(parsed_data_info)
if is_main_process():
prog_bar.update()
return data_list
def parse_data_info(self, img_info: dict) -> Union[dict, List[dict]]:
"""Parse raw annotation to target format.
Args:
img_info (dict): Raw image information, usually it includes
`img_id`, `file_name`, and `xml_path`.
Returns:
Union[dict, List[dict]]: Parsed annotation.
"""
data_info = {}
img_id = img_info['img_id']
xml_path = osp.join(self.data_prefix['img'], 'Annotations',
f'{img_id}.xml')
data_info['img_id'] = img_id
data_info['xml_path'] = xml_path
# deal with xml file
with get_local_path(
xml_path, backend_args=self.backend_args) as local_path:
raw_ann_info = ET.parse(local_path)
root = raw_ann_info.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
img_path = osp.join(self.data_prefix['img'], folder,
img_info['file_name'])
data_info['img_path'] = img_path
data_info['height'] = height
data_info['width'] = width
# Coordinates are in range [0, width - 1 or height - 1]
data_info['instances'] = self._parse_instance_info(
raw_ann_info, minus_one=False)
return data_info
| 3,103 | 33.10989 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/api_wrappers/coco_api.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# This file add snake case alias for coco api
import warnings
from collections import defaultdict
from typing import List, Optional, Union
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str, optional): Path of annotation file.
Defaults to None.
"""
def __init__(self, annotation_file: Optional[str] = None) -> None:
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self) -> None:
"""Create index."""
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
# annotations from different images but
# may have same segment_id
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
# filter out annotations from other images
img_to_anns_ = defaultdict(list)
for k, v in img_to_anns.items():
img_to_anns_[k] = [x for x in v if x['image_id'] == k]
img_to_anns = img_to_anns_
if 'images' in self.dataset:
for img_info in self.dataset['images']:
img_info['segm_file'] = img_info['file_name'].replace(
'jpg', 'png')
imgs[img_info['id']] = img_info
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self,
ids: Union[List[int], int] = []) -> Optional[List[dict]]:
"""Load anns with the specified ids.
``self.anns`` is a list of annotation lists instead of a
list of annotations.
Args:
ids (Union[List[int], int]): Integer ids specifying anns.
Returns:
anns (List[dict], optional): Loaded ann objects.
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
| 4,791 | 33.724638 | 126 |
py
|
ERD
|
ERD-main/mmdet/datasets/api_wrappers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']
| 147 | 28.6 | 50 |
py
|
ERD
|
ERD-main/mmdet/datasets/samplers/batch_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from torch.utils.data import BatchSampler, Sampler
from mmdet.registry import DATA_SAMPLERS
# TODO: maybe replace with a data_loader wrapper
@DATA_SAMPLERS.register_module()
class AspectRatioBatchSampler(BatchSampler):
"""A sampler wrapper for grouping images with similar aspect ratio (< 1 or.
>= 1) into a same batch.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``.
"""
def __init__(self,
sampler: Sampler,
batch_size: int,
drop_last: bool = False) -> None:
if not isinstance(sampler, Sampler):
raise TypeError('sampler should be an instance of ``Sampler``, '
f'but got {sampler}')
if not isinstance(batch_size, int) or batch_size <= 0:
raise ValueError('batch_size should be a positive integer value, '
f'but got batch_size={batch_size}')
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
# two groups for w < h and w >= h
self._aspect_ratio_buckets = [[] for _ in range(2)]
def __iter__(self) -> Sequence[int]:
for idx in self.sampler:
data_info = self.sampler.dataset.get_data_info(idx)
width, height = data_info['width'], data_info['height']
bucket_id = 0 if width < height else 1
bucket = self._aspect_ratio_buckets[bucket_id]
bucket.append(idx)
# yield a batch of indices in the same aspect ratio group
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
# yield the rest data and reset the bucket
left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[
1]
self._aspect_ratio_buckets = [[] for _ in range(2)]
while len(left_data) > 0:
if len(left_data) <= self.batch_size:
if not self.drop_last:
yield left_data[:]
left_data = []
else:
yield left_data[:self.batch_size]
left_data = left_data[self.batch_size:]
def __len__(self) -> int:
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
| 2,637 | 37.231884 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/samplers/multi_source_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Iterator, List, Optional, Sized, Union
import numpy as np
import torch
from mmengine.dataset import BaseDataset
from mmengine.dist import get_dist_info, sync_random_seed
from torch.utils.data import Sampler
from mmdet.registry import DATA_SAMPLERS
@DATA_SAMPLERS.register_module()
class MultiSourceSampler(Sampler):
r"""Multi-Source Infinite Sampler.
According to the sampling ratio, sample data from different
datasets to form batches.
Args:
dataset (Sized): The dataset.
batch_size (int): Size of mini-batch.
source_ratio (list[int | float]): The sampling ratio of different
source datasets in a mini-batch.
shuffle (bool): Whether shuffle the dataset or not. Defaults to True.
seed (int, optional): Random seed. If None, set a random seed.
Defaults to None.
Examples:
>>> dataset_type = 'ConcatDataset'
>>> sub_dataset_type = 'CocoDataset'
>>> data_root = 'data/coco/'
>>> sup_ann = '../coco_semi_annos/[email protected]'
>>> unsup_ann = '../coco_semi_annos/' \
>>> '[email protected]'
>>> dataset = dict(type=dataset_type,
>>> datasets=[
>>> dict(
>>> type=sub_dataset_type,
>>> data_root=data_root,
>>> ann_file=sup_ann,
>>> data_prefix=dict(img='train2017/'),
>>> filter_cfg=dict(filter_empty_gt=True, min_size=32),
>>> pipeline=sup_pipeline),
>>> dict(
>>> type=sub_dataset_type,
>>> data_root=data_root,
>>> ann_file=unsup_ann,
>>> data_prefix=dict(img='train2017/'),
>>> filter_cfg=dict(filter_empty_gt=True, min_size=32),
>>> pipeline=unsup_pipeline),
>>> ])
>>> train_dataloader = dict(
>>> batch_size=5,
>>> num_workers=5,
>>> persistent_workers=True,
>>> sampler=dict(type='MultiSourceSampler',
>>> batch_size=5, source_ratio=[1, 4]),
>>> batch_sampler=None,
>>> dataset=dataset)
"""
def __init__(self,
dataset: Sized,
batch_size: int,
source_ratio: List[Union[int, float]],
shuffle: bool = True,
seed: Optional[int] = None) -> None:
assert hasattr(dataset, 'cumulative_sizes'),\
f'The dataset must be ConcatDataset, but get {dataset}'
assert isinstance(batch_size, int) and batch_size > 0, \
'batch_size must be a positive integer value, ' \
f'but got batch_size={batch_size}'
assert isinstance(source_ratio, list), \
f'source_ratio must be a list, but got source_ratio={source_ratio}'
assert len(source_ratio) == len(dataset.cumulative_sizes), \
'The length of source_ratio must be equal to ' \
f'the number of datasets, but got source_ratio={source_ratio}'
rank, world_size = get_dist_info()
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.cumulative_sizes = [0] + dataset.cumulative_sizes
self.batch_size = batch_size
self.source_ratio = source_ratio
self.num_per_source = [
int(batch_size * sr / sum(source_ratio)) for sr in source_ratio
]
self.num_per_source[0] = batch_size - sum(self.num_per_source[1:])
assert sum(self.num_per_source) == batch_size, \
'The sum of num_per_source must be equal to ' \
f'batch_size, but get {self.num_per_source}'
self.seed = sync_random_seed() if seed is None else seed
self.shuffle = shuffle
self.source2inds = {
source: self._indices_of_rank(len(ds))
for source, ds in enumerate(dataset.datasets)
}
def _infinite_indices(self, sample_size: int) -> Iterator[int]:
"""Infinitely yield a sequence of indices."""
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
yield from torch.randperm(sample_size, generator=g).tolist()
else:
yield from torch.arange(sample_size).tolist()
def _indices_of_rank(self, sample_size: int) -> Iterator[int]:
"""Slice the infinite indices by rank."""
yield from itertools.islice(
self._infinite_indices(sample_size), self.rank, None,
self.world_size)
def __iter__(self) -> Iterator[int]:
batch_buffer = []
while True:
for source, num in enumerate(self.num_per_source):
batch_buffer_per_source = []
for idx in self.source2inds[source]:
idx += self.cumulative_sizes[source]
batch_buffer_per_source.append(idx)
if len(batch_buffer_per_source) == num:
batch_buffer += batch_buffer_per_source
break
yield from batch_buffer
batch_buffer = []
def __len__(self) -> int:
return len(self.dataset)
def set_epoch(self, epoch: int) -> None:
"""Not supported in `epoch-based runner."""
pass
@DATA_SAMPLERS.register_module()
class GroupMultiSourceSampler(MultiSourceSampler):
r"""Group Multi-Source Infinite Sampler.
According to the sampling ratio, sample data from different
datasets but the same group to form batches.
Args:
dataset (Sized): The dataset.
batch_size (int): Size of mini-batch.
source_ratio (list[int | float]): The sampling ratio of different
source datasets in a mini-batch.
shuffle (bool): Whether shuffle the dataset or not. Defaults to True.
seed (int, optional): Random seed. If None, set a random seed.
Defaults to None.
"""
def __init__(self,
dataset: BaseDataset,
batch_size: int,
source_ratio: List[Union[int, float]],
shuffle: bool = True,
seed: Optional[int] = None) -> None:
super().__init__(
dataset=dataset,
batch_size=batch_size,
source_ratio=source_ratio,
shuffle=shuffle,
seed=seed)
self._get_source_group_info()
self.group_source2inds = [{
source:
self._indices_of_rank(self.group2size_per_source[source][group])
for source in range(len(dataset.datasets))
} for group in range(len(self.group_ratio))]
def _get_source_group_info(self) -> None:
self.group2size_per_source = [{0: 0, 1: 0}, {0: 0, 1: 0}]
self.group2inds_per_source = [{0: [], 1: []}, {0: [], 1: []}]
for source, dataset in enumerate(self.dataset.datasets):
for idx in range(len(dataset)):
data_info = dataset.get_data_info(idx)
width, height = data_info['width'], data_info['height']
group = 0 if width < height else 1
self.group2size_per_source[source][group] += 1
self.group2inds_per_source[source][group].append(idx)
self.group_sizes = np.zeros(2, dtype=np.int64)
for group2size in self.group2size_per_source:
for group, size in group2size.items():
self.group_sizes[group] += size
self.group_ratio = self.group_sizes / sum(self.group_sizes)
def __iter__(self) -> Iterator[int]:
batch_buffer = []
while True:
group = np.random.choice(
list(range(len(self.group_ratio))), p=self.group_ratio)
for source, num in enumerate(self.num_per_source):
batch_buffer_per_source = []
for idx in self.group_source2inds[group][source]:
idx = self.group2inds_per_source[source][group][
idx] + self.cumulative_sizes[source]
batch_buffer_per_source.append(idx)
if len(batch_buffer_per_source) == num:
batch_buffer += batch_buffer_per_source
break
yield from batch_buffer
batch_buffer = []
| 8,580 | 38.911628 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/samplers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler'
]
| 347 | 33.8 | 77 |
py
|
ERD
|
ERD-main/mmdet/datasets/samplers/class_aware_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Dict, Iterator, Optional, Union
import numpy as np
import torch
from mmengine.dataset import BaseDataset
from mmengine.dist import get_dist_info, sync_random_seed
from torch.utils.data import Sampler
from mmdet.registry import DATA_SAMPLERS
@DATA_SAMPLERS.register_module()
class ClassAwareSampler(Sampler):
r"""Sampler that restricts data loading to the label of the dataset.
A class-aware sampling strategy to effectively tackle the
non-uniform class distribution. The length of the training data is
consistent with source data. Simple improvements based on `Relay
Backpropagation for Effective Learning of Deep Convolutional
Neural Networks <https://arxiv.org/abs/1512.05830>`_
The implementation logic is referred to
https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py
Args:
dataset: Dataset used for sampling.
seed (int, optional): random seed used to shuffle the sampler.
This number should be identical across all
processes in the distributed group. Defaults to None.
num_sample_class (int): The number of samples taken from each
per-label list. Defaults to 1.
"""
def __init__(self,
dataset: BaseDataset,
seed: Optional[int] = None,
num_sample_class: int = 1) -> None:
rank, world_size = get_dist_info()
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.epoch = 0
# Must be the same across all workers. If None, will use a
# random seed shared among workers
# (require synchronization among all workers)
if seed is None:
seed = sync_random_seed()
self.seed = seed
# The number of samples taken from each per-label list
assert num_sample_class > 0 and isinstance(num_sample_class, int)
self.num_sample_class = num_sample_class
# Get per-label image list from dataset
self.cat_dict = self.get_cat2imgs()
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size))
self.total_size = self.num_samples * self.world_size
# get number of images containing each category
self.num_cat_imgs = [len(x) for x in self.cat_dict.values()]
# filter labels without images
self.valid_cat_inds = [
i for i, length in enumerate(self.num_cat_imgs) if length != 0
]
self.num_classes = len(self.valid_cat_inds)
def get_cat2imgs(self) -> Dict[int, list]:
"""Get a dict with class as key and img_ids as values.
Returns:
dict[int, list]: A dict of per-label image list,
the item of the dict indicates a label index,
corresponds to the image index that contains the label.
"""
classes = self.dataset.metainfo.get('classes', None)
if classes is None:
raise ValueError('dataset metainfo must contain `classes`')
# sort the label index
cat2imgs = {i: [] for i in range(len(classes))}
for i in range(len(self.dataset)):
cat_ids = set(self.dataset.get_cat_ids(i))
for cat in cat_ids:
cat2imgs[cat].append(i)
return cat2imgs
def __iter__(self) -> Iterator[int]:
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
# initialize label list
label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g)
# initialize each per-label image list
data_iter_dict = dict()
for i in self.valid_cat_inds:
data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g)
def gen_cat_img_inds(cls_list, data_dict, num_sample_cls):
"""Traverse the categories and extract `num_sample_cls` image
indexes of the corresponding categories one by one."""
id_indices = []
for _ in range(len(cls_list)):
cls_idx = next(cls_list)
for _ in range(num_sample_cls):
id = next(data_dict[cls_idx])
id_indices.append(id)
return id_indices
# deterministically shuffle based on epoch
num_bins = int(
math.ceil(self.total_size * 1.0 / self.num_classes /
self.num_sample_class))
indices = []
for i in range(num_bins):
indices += gen_cat_img_inds(label_iter_list, data_iter_dict,
self.num_sample_class)
# fix extra samples to make it evenly divisible
if len(indices) >= self.total_size:
indices = indices[:self.total_size]
else:
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
"""The number of samples in this rank."""
return self.num_samples
def set_epoch(self, epoch: int) -> None:
"""Sets the epoch for this sampler.
When :attr:`shuffle=True`, this ensures all replicas use a different
random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
class RandomCycleIter:
"""Shuffle the list and do it again after the list have traversed.
The implementation logic is referred to
https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py
Example:
>>> label_list = [0, 1, 2, 4, 5]
>>> g = torch.Generator()
>>> g.manual_seed(0)
>>> label_iter_list = RandomCycleIter(label_list, generator=g)
>>> index = next(label_iter_list)
Args:
data (list or ndarray): The data that needs to be shuffled.
generator: An torch.Generator object, which is used in setting the seed
for generating random numbers.
""" # noqa: W605
def __init__(self,
data: Union[list, np.ndarray],
generator: torch.Generator = None) -> None:
self.data = data
self.length = len(data)
self.index = torch.randperm(self.length, generator=generator).numpy()
self.i = 0
self.generator = generator
def __iter__(self) -> Iterator:
return self
def __len__(self) -> int:
return len(self.data)
def __next__(self):
if self.i == self.length:
self.index = torch.randperm(
self.length, generator=self.generator).numpy()
self.i = 0
idx = self.data[self.index[self.i]]
self.i += 1
return idx
| 7,132 | 35.958549 | 104 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/formatting.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmcv.transforms import to_tensor
from mmcv.transforms.base import BaseTransform
from mmengine.structures import InstanceData, PixelData
from mmdet.registry import TRANSFORMS
from mmdet.structures import DetDataSample
from mmdet.structures.bbox import BaseBoxes
@TRANSFORMS.register_module()
class PackDetInputs(BaseTransform):
"""Pack the inputs data for the detection / semantic segmentation /
panoptic segmentation.
The ``img_meta`` item is always populated. The contents of the
``img_meta`` dictionary depends on ``meta_keys``. By default this includes:
- ``img_id``: id of the image
- ``img_path``: path to the image file
- ``ori_shape``: original shape of the image as a tuple (h, w)
- ``img_shape``: shape of the image input to the network as a tuple \
(h, w). Note that images may be zero padded on the \
bottom/right if the batch tensor is larger than this shape.
- ``scale_factor``: a float indicating the preprocessing scale
- ``flip``: a boolean indicating if image flip transform was used
- ``flip_direction``: the flipping direction
Args:
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction')``
"""
mapping_table = {
'gt_bboxes': 'bboxes',
'gt_bboxes_labels': 'labels',
'gt_masks': 'masks'
}
def __init__(self,
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction')):
self.meta_keys = meta_keys
def transform(self, results: dict) -> dict:
"""Method to pack the input data.
Args:
results (dict): Result dict from the data pipeline.
Returns:
dict:
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
- 'data_sample' (obj:`DetDataSample`): The annotation info of the
sample.
"""
packed_results = dict()
if 'img' in results:
img = results['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
# To improve the computational speed by by 3-5 times, apply:
# If image is not contiguous, use
# `numpy.transpose()` followed by `numpy.ascontiguousarray()`
# If image is already contiguous, use
# `torch.permute()` followed by `torch.contiguous()`
# Refer to https://github.com/open-mmlab/mmdetection/pull/9533
# for more details
if not img.flags.c_contiguous:
img = np.ascontiguousarray(img.transpose(2, 0, 1))
img = to_tensor(img)
else:
img = to_tensor(img).permute(2, 0, 1).contiguous()
packed_results['inputs'] = img
if 'gt_ignore_flags' in results:
valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]
ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]
data_sample = DetDataSample()
instance_data = InstanceData()
ignore_instance_data = InstanceData()
for key in self.mapping_table.keys():
if key not in results:
continue
if key == 'gt_masks' or isinstance(results[key], BaseBoxes):
if 'gt_ignore_flags' in results:
instance_data[
self.mapping_table[key]] = results[key][valid_idx]
ignore_instance_data[
self.mapping_table[key]] = results[key][ignore_idx]
else:
instance_data[self.mapping_table[key]] = results[key]
else:
if 'gt_ignore_flags' in results:
instance_data[self.mapping_table[key]] = to_tensor(
results[key][valid_idx])
ignore_instance_data[self.mapping_table[key]] = to_tensor(
results[key][ignore_idx])
else:
instance_data[self.mapping_table[key]] = to_tensor(
results[key])
data_sample.gt_instances = instance_data
data_sample.ignored_instances = ignore_instance_data
if 'proposals' in results:
proposals = InstanceData(
bboxes=to_tensor(results['proposals']),
scores=to_tensor(results['proposals_scores']))
data_sample.proposals = proposals
if 'gt_seg_map' in results:
gt_sem_seg_data = dict(
sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
img_meta = {}
for key in self.meta_keys:
assert key in results, f'`{key}` is not found in `results`, ' \
f'the valid keys are {list(results)}.'
img_meta[key] = results[key]
data_sample.set_metainfo(img_meta)
packed_results['data_samples'] = data_sample
return packed_results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(meta_keys={self.meta_keys})'
return repr_str
@TRANSFORMS.register_module()
class ToTensor:
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@TRANSFORMS.register_module()
class ImageToTensor:
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and permuted to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img).permute(2, 0, 1).contiguous()
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@TRANSFORMS.register_module()
class Transpose:
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to transpose the channel order of data in results.
Args:
results (dict): Result dict contains the data to transpose.
Returns:
dict: The result dict contains the data transposed to \
``self.order``.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@TRANSFORMS.register_module()
class WrapFieldsToLists:
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='Pad', size_divisor=32),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapFieldsToLists')
>>> ]
"""
def __call__(self, results):
"""Call function to wrap fields into lists.
Args:
results (dict): Result dict contains the data to wrap.
Returns:
dict: The result dict where value of ``self.keys`` are wrapped \
into list.
"""
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| 9,565 | 32.80212 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/loading.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.transforms import BaseTransform
from mmcv.transforms import LoadAnnotations as MMCV_LoadAnnotations
from mmcv.transforms import LoadImageFromFile
from mmengine.fileio import get
from mmengine.structures import BaseDataElement
from mmdet.registry import TRANSFORMS
from mmdet.structures.bbox import get_box_type
from mmdet.structures.bbox.box_type import autocast_box_type
from mmdet.structures.mask import BitmapMasks, PolygonMasks
@TRANSFORMS.register_module()
class LoadImageFromNDArray(LoadImageFromFile):
"""Load an image from ``results['img']``.
Similar with :obj:`LoadImageFromFile`, but the image has been loaded as
:obj:`np.ndarray` in ``results['img']``. Can be used when loading image
from webcam.
Required Keys:
- img
Modified Keys:
- img
- img_path
- img_shape
- ori_shape
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
"""
def transform(self, results: dict) -> dict:
"""Transform function to add image meta information.
Args:
results (dict): Result dict with Webcam read image in
``results['img']``.
Returns:
dict: The dict contains loaded image and meta information.
"""
img = results['img']
if self.to_float32:
img = img.astype(np.float32)
results['img_path'] = None
results['img'] = img
results['img_shape'] = img.shape[:2]
results['ori_shape'] = img.shape[:2]
return results
@TRANSFORMS.register_module()
class LoadMultiChannelImageFromFiles(BaseTransform):
"""Load multi-channel images from a list of separate channel files.
Required Keys:
- img_path
Modified Keys:
- img
- img_shape
- ori_shape
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:``mmcv.imfrombytes``.
Defaults to 'unchanged'.
imdecode_backend (str): The image decoding backend type. The backend
argument for :func:``mmcv.imfrombytes``.
See :func:``mmcv.imfrombytes`` for details.
Defaults to 'cv2'.
file_client_args (dict): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet >= 3.0.0rc7. Defaults to None.
"""
def __init__(
self,
to_float32: bool = False,
color_type: str = 'unchanged',
imdecode_backend: str = 'cv2',
file_client_args: dict = None,
backend_args: dict = None,
) -> None:
self.to_float32 = to_float32
self.color_type = color_type
self.imdecode_backend = imdecode_backend
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
def transform(self, results: dict) -> dict:
"""Transform functions to load multiple images and get images meta
information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded images and meta information.
"""
assert isinstance(results['img_path'], list)
img = []
for name in results['img_path']:
img_bytes = get(name, backend_args=self.backend_args)
img.append(
mmcv.imfrombytes(
img_bytes,
flag=self.color_type,
backend=self.imdecode_backend))
img = np.stack(img, axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['img'] = img
results['img_shape'] = img.shape[:2]
results['ori_shape'] = img.shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f"imdecode_backend='{self.imdecode_backend}', "
f'backend_args={self.backend_args})')
return repr_str
@TRANSFORMS.register_module()
class LoadAnnotations(MMCV_LoadAnnotations):
"""Load and process the ``instances`` and ``seg_map`` annotation provided
by dataset.
The annotation format is as the following:
.. code-block:: python
{
'instances':
[
{
# List of 4 numbers representing the bounding box of the
# instance, in (x1, y1, x2, y2) order.
'bbox': [x1, y1, x2, y2],
# Label of image classification.
'bbox_label': 1,
# Used in instance/panoptic segmentation. The segmentation mask
# of the instance or the information of segments.
# 1. If list[list[float]], it represents a list of polygons,
# one for each connected component of the object. Each
# list[float] is one simple polygon in the format of
# [x1, y1, ..., xn, yn] (n≥3). The Xs and Ys are absolute
# coordinates in unit of pixels.
# 2. If dict, it represents the per-pixel segmentation mask in
# COCO’s compressed RLE format. The dict should have keys
# “size” and “counts”. Can be loaded by pycocotools
'mask': list[list[float]] or dict,
}
]
# Filename of semantic or panoptic segmentation ground truth file.
'seg_map_path': 'a/b/c'
}
After this module, the annotation has been changed to the format below:
.. code-block:: python
{
# In (x1, y1, x2, y2) order, float type. N is the number of bboxes
# in an image
'gt_bboxes': BaseBoxes(N, 4)
# In int type.
'gt_bboxes_labels': np.ndarray(N, )
# In built-in class
'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)
# In uint8 type.
'gt_seg_map': np.ndarray (H, W)
# in (x, y, v) order, float type.
}
Required Keys:
- height
- width
- instances
- bbox (optional)
- bbox_label
- mask (optional)
- ignore_flag
- seg_map_path (optional)
Added Keys:
- gt_bboxes (BaseBoxes[torch.float32])
- gt_bboxes_labels (np.int64)
- gt_masks (BitmapMasks | PolygonMasks)
- gt_seg_map (np.uint8)
- gt_ignore_flags (bool)
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Defaults to True.
with_label (bool): Whether to parse and load the label annotation.
Defaults to True.
with_mask (bool): Whether to parse and load the mask annotation.
Default: False.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Defaults to False.
poly2mask (bool): Whether to convert mask to bitmap. Default: True.
box_type (str): The box type used to wrap the bboxes. If ``box_type``
is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.
imdecode_backend (str): The image decoding backend type. The backend
argument for :func:``mmcv.imfrombytes``.
See :fun:``mmcv.imfrombytes`` for details.
Defaults to 'cv2'.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
with_mask: bool = False,
poly2mask: bool = True,
box_type: str = 'hbox',
**kwargs) -> None:
super(LoadAnnotations, self).__init__(**kwargs)
self.with_mask = with_mask
self.poly2mask = poly2mask
self.box_type = box_type
def _load_bboxes(self, results: dict) -> None:
"""Private function to load bounding box annotations.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
dict: The dict contains loaded bounding box annotations.
"""
gt_bboxes = []
gt_ignore_flags = []
for instance in results.get('instances', []):
gt_bboxes.append(instance['bbox'])
gt_ignore_flags.append(instance['ignore_flag'])
if self.box_type is None:
results['gt_bboxes'] = np.array(
gt_bboxes, dtype=np.float32).reshape((-1, 4))
else:
_, box_type_cls = get_box_type(self.box_type)
results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)
results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)
def _load_labels(self, results: dict) -> None:
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
dict: The dict contains loaded label annotations.
"""
gt_bboxes_labels = []
for instance in results.get('instances', []):
gt_bboxes_labels.append(instance['bbox_label'])
# TODO: Inconsistent with mmcv, consider how to deal with it later.
results['gt_bboxes_labels'] = np.array(
gt_bboxes_labels, dtype=np.int64)
def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,
img_w: int) -> np.ndarray:
"""Private function to convert masks represented with polygon to
bitmaps.
Args:
mask_ann (list | dict): Polygon mask annotation input.
img_h (int): The height of output mask.
img_w (int): The width of output mask.
Returns:
np.ndarray: The decode bitmap mask of shape (img_h, img_w).
"""
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _process_masks(self, results: dict) -> list:
"""Process gt_masks and filter invalid polygons.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
list: Processed gt_masks.
"""
gt_masks = []
gt_ignore_flags = []
for instance in results.get('instances', []):
gt_mask = instance['mask']
# If the annotation of segmentation mask is invalid,
# ignore the whole instance.
if isinstance(gt_mask, list):
gt_mask = [
np.array(polygon) for polygon in gt_mask
if len(polygon) % 2 == 0 and len(polygon) >= 6
]
if len(gt_mask) == 0:
# ignore this instance and set gt_mask to a fake mask
instance['ignore_flag'] = 1
gt_mask = [np.zeros(6)]
elif not self.poly2mask:
# `PolygonMasks` requires a ploygon of format List[np.array],
# other formats are invalid.
instance['ignore_flag'] = 1
gt_mask = [np.zeros(6)]
elif isinstance(gt_mask, dict) and \
not (gt_mask.get('counts') is not None and
gt_mask.get('size') is not None and
isinstance(gt_mask['counts'], (list, str))):
# if gt_mask is a dict, it should include `counts` and `size`,
# so that `BitmapMasks` can uncompressed RLE
instance['ignore_flag'] = 1
gt_mask = [np.zeros(6)]
gt_masks.append(gt_mask)
# re-process gt_ignore_flags
gt_ignore_flags.append(instance['ignore_flag'])
results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)
return gt_masks
def _load_masks(self, results: dict) -> None:
"""Private function to load mask annotations.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
"""
h, w = results['ori_shape']
gt_masks = self._process_masks(results)
if self.poly2mask:
gt_masks = BitmapMasks(
[self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
else:
# fake polygon masks will be ignored in `PackDetInputs`
gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)
results['gt_masks'] = gt_masks
def transform(self, results: dict) -> dict:
"""Function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
dict: The dict contains loaded bounding box, label and
semantic segmentation.
"""
if self.with_bbox:
self._load_bboxes(results)
if self.with_label:
self._load_labels(results)
if self.with_mask:
self._load_masks(results)
if self.with_seg:
self._load_seg_map(results)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(with_bbox={self.with_bbox}, '
repr_str += f'with_label={self.with_label}, '
repr_str += f'with_mask={self.with_mask}, '
repr_str += f'with_seg={self.with_seg}, '
repr_str += f'poly2mask={self.poly2mask}, '
repr_str += f"imdecode_backend='{self.imdecode_backend}', "
repr_str += f'backend_args={self.backend_args})'
return repr_str
@TRANSFORMS.register_module()
class LoadPanopticAnnotations(LoadAnnotations):
"""Load multiple types of panoptic annotations.
The annotation format is as the following:
.. code-block:: python
{
'instances':
[
{
# List of 4 numbers representing the bounding box of the
# instance, in (x1, y1, x2, y2) order.
'bbox': [x1, y1, x2, y2],
# Label of image classification.
'bbox_label': 1,
},
...
]
'segments_info':
[
{
# id = cls_id + instance_id * INSTANCE_OFFSET
'id': int,
# Contiguous category id defined in dataset.
'category': int
# Thing flag.
'is_thing': bool
},
...
]
# Filename of semantic or panoptic segmentation ground truth file.
'seg_map_path': 'a/b/c'
}
After this module, the annotation has been changed to the format below:
.. code-block:: python
{
# In (x1, y1, x2, y2) order, float type. N is the number of bboxes
# in an image
'gt_bboxes': BaseBoxes(N, 4)
# In int type.
'gt_bboxes_labels': np.ndarray(N, )
# In built-in class
'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)
# In uint8 type.
'gt_seg_map': np.ndarray (H, W)
# in (x, y, v) order, float type.
}
Required Keys:
- height
- width
- instances
- bbox
- bbox_label
- ignore_flag
- segments_info
- id
- category
- is_thing
- seg_map_path
Added Keys:
- gt_bboxes (BaseBoxes[torch.float32])
- gt_bboxes_labels (np.int64)
- gt_masks (BitmapMasks | PolygonMasks)
- gt_seg_map (np.uint8)
- gt_ignore_flags (bool)
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Defaults to True.
with_label (bool): Whether to parse and load the label annotation.
Defaults to True.
with_mask (bool): Whether to parse and load the mask annotation.
Defaults to True.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Defaults to False.
box_type (str): The box mode used to wrap the bboxes.
imdecode_backend (str): The image decoding backend type. The backend
argument for :func:``mmcv.imfrombytes``.
See :fun:``mmcv.imfrombytes`` for details.
Defaults to 'cv2'.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet >= 3.0.0rc7. Defaults to None.
"""
def __init__(self,
with_bbox: bool = True,
with_label: bool = True,
with_mask: bool = True,
with_seg: bool = True,
box_type: str = 'hbox',
imdecode_backend: str = 'cv2',
backend_args: dict = None) -> None:
try:
from panopticapi import utils
except ImportError:
raise ImportError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
self.rgb2id = utils.rgb2id
super(LoadPanopticAnnotations, self).__init__(
with_bbox=with_bbox,
with_label=with_label,
with_mask=with_mask,
with_seg=with_seg,
with_keypoints=False,
box_type=box_type,
imdecode_backend=imdecode_backend,
backend_args=backend_args)
def _load_masks_and_semantic_segs(self, results: dict) -> None:
"""Private function to load mask and semantic segmentation annotations.
In gt_semantic_seg, the foreground label is from ``0`` to
``num_things - 1``, the background label is from ``num_things`` to
``num_things + num_stuff - 1``, 255 means the ignored label (``VOID``).
Args:
results (dict): Result dict from :obj:``mmdet.CustomDataset``.
"""
# seg_map_path is None, when inference on the dataset without gts.
if results.get('seg_map_path', None) is None:
return
img_bytes = get(
results['seg_map_path'], backend_args=self.backend_args)
pan_png = mmcv.imfrombytes(
img_bytes, flag='color', channel_order='rgb').squeeze()
pan_png = self.rgb2id(pan_png)
gt_masks = []
gt_seg = np.zeros_like(pan_png) + 255 # 255 as ignore
for segment_info in results['segments_info']:
mask = (pan_png == segment_info['id'])
gt_seg = np.where(mask, segment_info['category'], gt_seg)
# The legal thing masks
if segment_info.get('is_thing'):
gt_masks.append(mask.astype(np.uint8))
if self.with_mask:
h, w = results['ori_shape']
gt_masks = BitmapMasks(gt_masks, h, w)
results['gt_masks'] = gt_masks
if self.with_seg:
results['gt_seg_map'] = gt_seg
def transform(self, results: dict) -> dict:
"""Function to load multiple types panoptic annotations.
Args:
results (dict): Result dict from :obj:``mmdet.CustomDataset``.
Returns:
dict: The dict contains loaded bounding box, label, mask and
semantic segmentation annotations.
"""
if self.with_bbox:
self._load_bboxes(results)
if self.with_label:
self._load_labels(results)
if self.with_mask or self.with_seg:
# The tasks completed by '_load_masks' and '_load_semantic_segs'
# in LoadAnnotations are merged to one function.
self._load_masks_and_semantic_segs(results)
return results
@TRANSFORMS.register_module()
class LoadProposals(BaseTransform):
"""Load proposal pipeline.
Required Keys:
- proposals
Modified Keys:
- proposals
Args:
num_max_proposals (int, optional): Maximum number of proposals to load.
If not specified, all proposals will be loaded.
"""
def __init__(self, num_max_proposals: Optional[int] = None) -> None:
self.num_max_proposals = num_max_proposals
def transform(self, results: dict) -> dict:
"""Transform function to load proposals from file.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded proposal annotations.
"""
proposals = results['proposals']
# the type of proposals should be `dict` or `InstanceData`
assert isinstance(proposals, dict) \
or isinstance(proposals, BaseDataElement)
bboxes = proposals['bboxes'].astype(np.float32)
assert bboxes.shape[1] == 4, \
f'Proposals should have shapes (n, 4), but found {bboxes.shape}'
if 'scores' in proposals:
scores = proposals['scores'].astype(np.float32)
assert bboxes.shape[0] == scores.shape[0]
else:
scores = np.zeros(bboxes.shape[0], dtype=np.float32)
if self.num_max_proposals is not None:
# proposals should sort by scores during dumping the proposals
bboxes = bboxes[:self.num_max_proposals]
scores = scores[:self.num_max_proposals]
if len(bboxes) == 0:
bboxes = np.zeros((0, 4), dtype=np.float32)
scores = np.zeros(0, dtype=np.float32)
results['proposals'] = bboxes
results['proposals_scores'] = scores
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(num_max_proposals={self.num_max_proposals})'
@TRANSFORMS.register_module()
class FilterAnnotations(BaseTransform):
"""Filter invalid annotations.
Required Keys:
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_ignore_flags (bool) (optional)
Modified Keys:
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_masks (optional)
- gt_ignore_flags (optional)
Args:
min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth
boxes. Default: (1., 1.)
min_gt_mask_area (int): Minimum foreground area of ground truth masks.
Default: 1
by_box (bool): Filter instances with bounding boxes not meeting the
min_gt_bbox_wh threshold. Default: True
by_mask (bool): Filter instances with masks not meeting
min_gt_mask_area threshold. Default: False
keep_empty (bool): Whether to return None when it
becomes an empty bbox after filtering. Defaults to True.
"""
def __init__(self,
min_gt_bbox_wh: Tuple[int, int] = (1, 1),
min_gt_mask_area: int = 1,
by_box: bool = True,
by_mask: bool = False,
keep_empty: bool = True) -> None:
# TODO: add more filter options
assert by_box or by_mask
self.min_gt_bbox_wh = min_gt_bbox_wh
self.min_gt_mask_area = min_gt_mask_area
self.by_box = by_box
self.by_mask = by_mask
self.keep_empty = keep_empty
@autocast_box_type()
def transform(self, results: dict) -> Union[dict, None]:
"""Transform function to filter annotations.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
assert 'gt_bboxes' in results
gt_bboxes = results['gt_bboxes']
if gt_bboxes.shape[0] == 0:
return results
tests = []
if self.by_box:
tests.append(
((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &
(gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())
if self.by_mask:
assert 'gt_masks' in results
gt_masks = results['gt_masks']
tests.append(gt_masks.areas >= self.min_gt_mask_area)
keep = tests[0]
for t in tests[1:]:
keep = keep & t
if not keep.any():
if self.keep_empty:
return None
keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')
for key in keys:
if key in results:
results[key] = results[key][keep]
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \
f'keep_empty={self.keep_empty})'
@TRANSFORMS.register_module()
class LoadEmptyAnnotations(BaseTransform):
"""Load Empty Annotations for unlabeled images.
Added Keys:
- gt_bboxes (np.float32)
- gt_bboxes_labels (np.int64)
- gt_masks (BitmapMasks | PolygonMasks)
- gt_seg_map (np.uint8)
- gt_ignore_flags (bool)
Args:
with_bbox (bool): Whether to load the pseudo bbox annotation.
Defaults to True.
with_label (bool): Whether to load the pseudo label annotation.
Defaults to True.
with_mask (bool): Whether to load the pseudo mask annotation.
Default: False.
with_seg (bool): Whether to load the pseudo semantic segmentation
annotation. Defaults to False.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
"""
def __init__(self,
with_bbox: bool = True,
with_label: bool = True,
with_mask: bool = False,
with_seg: bool = False,
seg_ignore_label: int = 255) -> None:
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.seg_ignore_label = seg_ignore_label
def transform(self, results: dict) -> dict:
"""Transform function to load empty annotations.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
if self.with_bbox:
results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32)
results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)
if self.with_label:
results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)
if self.with_mask:
# TODO: support PolygonMasks
h, w = results['img_shape']
gt_masks = np.zeros((0, h, w), dtype=np.uint8)
results['gt_masks'] = BitmapMasks(gt_masks, h, w)
if self.with_seg:
h, w = results['img_shape']
results['gt_seg_map'] = self.seg_ignore_label * np.ones(
(h, w), dtype=np.uint8)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(with_bbox={self.with_bbox}, '
repr_str += f'with_label={self.with_label}, '
repr_str += f'with_mask={self.with_mask}, '
repr_str += f'with_seg={self.with_seg}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label})'
return repr_str
@TRANSFORMS.register_module()
class InferencerLoader(BaseTransform):
"""Load an image from ``results['img']``.
Similar with :obj:`LoadImageFromFile`, but the image has been loaded as
:obj:`np.ndarray` in ``results['img']``. Can be used when loading image
from webcam.
Required Keys:
- img
Modified Keys:
- img
- img_path
- img_shape
- ori_shape
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
"""
def __init__(self, **kwargs) -> None:
super().__init__()
self.from_file = TRANSFORMS.build(
dict(type='LoadImageFromFile', **kwargs))
self.from_ndarray = TRANSFORMS.build(
dict(type='mmdet.LoadImageFromNDArray', **kwargs))
def transform(self, results: Union[str, np.ndarray, dict]) -> dict:
"""Transform function to add image meta information.
Args:
results (str, np.ndarray or dict): The result.
Returns:
dict: The dict contains loaded image and meta information.
"""
if isinstance(results, str):
inputs = dict(img_path=results)
elif isinstance(results, np.ndarray):
inputs = dict(img=results)
elif isinstance(results, dict):
inputs = results
else:
raise NotImplementedError
if 'img' in inputs:
return self.from_ndarray(inputs)
return self.from_file(inputs)
| 30,139 | 33.25 | 125 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/instaboost.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import numpy as np
from mmcv.transforms import BaseTransform
from mmdet.registry import TRANSFORMS
@TRANSFORMS.register_module()
class InstaBoost(BaseTransform):
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Required Keys:
- img (np.uint8)
- instances
Modified Keys:
- img (np.uint8)
- instances
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Defaults to ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Defaults to (1, 0, 0).
scale (tuple): (min scale, max scale). Defaults to (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Defaults to 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Defaults to 15.
theta (tuple): (min rotation degree, max rotation degree). \
Defaults to (-1, 1).
color_prob (float): Probability of images for color augmentation.
Defaults to 0.5.
hflag (bool): Whether to use heatmap guided. Defaults to False.
aug_ratio (float): Probability of applying this transformation. \
Defaults to 0.5.
"""
def __init__(self,
action_candidate: tuple = ('normal', 'horizontal', 'skip'),
action_prob: tuple = (1, 0, 0),
scale: tuple = (0.8, 1.2),
dx: int = 15,
dy: int = 15,
theta: tuple = (-1, 1),
color_prob: float = 0.5,
hflag: bool = False,
aug_ratio: float = 0.5) -> None:
import matplotlib
import matplotlib.pyplot as plt
default_backend = plt.get_backend()
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
# instaboost will modify the default backend
# and cause visualization to fail.
matplotlib.use(default_backend)
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results: dict) -> Tuple[list, list]:
"""Convert raw anns to instaboost expected input format."""
anns = []
ignore_anns = []
for instance in results['instances']:
label = instance['bbox_label']
bbox = instance['bbox']
mask = instance['mask']
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
if instance['ignore_flag'] == 0:
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
else:
# Ignore instances without data augmentation
ignore_anns.append(instance)
return anns, ignore_anns
def _parse_anns(self, results: dict, anns: list, ignore_anns: list,
img: np.ndarray) -> dict:
"""Restore the result of instaboost processing to the original anns
format."""
instances = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
instances.append(
dict(
bbox=bbox,
bbox_label=ann['category_id'],
mask=ann['segmentation'],
ignore_flag=0))
instances.extend(ignore_anns)
results['img'] = img
results['instances'] = instances
return results
def transform(self, results) -> dict:
"""The transform function."""
img = results['img']
ori_type = img.dtype
if 'instances' not in results or len(results['instances']) == 0:
return results
anns, ignore_anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, ignore_anns,
img.astype(ori_type))
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(aug_ratio={self.aug_ratio})'
return repr_str
| 5,489 | 35.357616 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/geometric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
import cv2
import mmcv
import numpy as np
from mmcv.transforms import BaseTransform
from mmcv.transforms.utils import cache_randomness
from mmdet.registry import TRANSFORMS
from mmdet.structures.bbox import autocast_box_type
from .augment_wrappers import _MAX_LEVEL, level_to_mag
@TRANSFORMS.register_module()
class GeomTransform(BaseTransform):
"""Base class for geometric transformations. All geometric transformations
need to inherit from this base class. ``GeomTransform`` unifies the class
attributes and class functions of geometric transformations (ShearX,
ShearY, Rotate, TranslateX, and TranslateY), and records the homography
matrix.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- homography_matrix
Args:
prob (float): The probability for performing the geometric
transformation and should be in range [0, 1]. Defaults to 1.0.
level (int, optional): The level should be in range [0, _MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for geometric transformation.
Defaults to 0.0.
max_mag (float): The maximum magnitude for geometric transformation.
Defaults to 1.0.
reversal_prob (float): The probability that reverses the geometric
transformation magnitude. Should be in range [0,1].
Defaults to 0.5.
img_border_value (int | float | tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 1.0,
reversal_prob: float = 0.5,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
interpolation: str = 'bilinear') -> None:
assert 0 <= prob <= 1.0, f'The probability of the transformation ' \
f'should be in range [0,1], got {prob}.'
assert level is None or isinstance(level, int), \
f'The level should be None or type int, got {type(level)}.'
assert level is None or 0 <= level <= _MAX_LEVEL, \
f'The level should be in range [0,{_MAX_LEVEL}], got {level}.'
assert isinstance(min_mag, float), \
f'min_mag should be type float, got {type(min_mag)}.'
assert isinstance(max_mag, float), \
f'max_mag should be type float, got {type(max_mag)}.'
assert min_mag <= max_mag, \
f'min_mag should smaller than max_mag, ' \
f'got min_mag={min_mag} and max_mag={max_mag}'
assert isinstance(reversal_prob, float), \
f'reversal_prob should be type float, got {type(max_mag)}.'
assert 0 <= reversal_prob <= 1.0, \
f'The reversal probability of the transformation magnitude ' \
f'should be type float, got {type(reversal_prob)}.'
if isinstance(img_border_value, (float, int)):
img_border_value = tuple([float(img_border_value)] * 3)
elif isinstance(img_border_value, tuple):
assert len(img_border_value) == 3, \
f'img_border_value as tuple must have 3 elements, ' \
f'got {len(img_border_value)}.'
img_border_value = tuple([float(val) for val in img_border_value])
else:
raise ValueError(
'img_border_value must be float or tuple with 3 elements.')
assert np.all([0 <= val <= 255 for val in img_border_value]), 'all ' \
'elements of img_border_value should between range [0,255].' \
f'got {img_border_value}.'
self.prob = prob
self.level = level
self.min_mag = min_mag
self.max_mag = max_mag
self.reversal_prob = reversal_prob
self.img_border_value = img_border_value
self.mask_border_value = mask_border_value
self.seg_ignore_label = seg_ignore_label
self.interpolation = interpolation
def _transform_img(self, results: dict, mag: float) -> None:
"""Transform the image."""
pass
def _transform_masks(self, results: dict, mag: float) -> None:
"""Transform the masks."""
pass
def _transform_seg(self, results: dict, mag: float) -> None:
"""Transform the segmentation map."""
pass
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
"""Get the homography matrix for the geometric transformation."""
return np.eye(3, dtype=np.float32)
def _transform_bboxes(self, results: dict, mag: float) -> None:
"""Transform the bboxes."""
results['gt_bboxes'].project_(self.homography_matrix)
results['gt_bboxes'].clip_(results['img_shape'])
def _record_homography_matrix(self, results: dict) -> None:
"""Record the homography matrix for the geometric transformation."""
if results.get('homography_matrix', None) is None:
results['homography_matrix'] = self.homography_matrix
else:
results['homography_matrix'] = self.homography_matrix @ results[
'homography_matrix']
@cache_randomness
def _random_disable(self):
"""Randomly disable the transform."""
return np.random.rand() > self.prob
@cache_randomness
def _get_mag(self):
"""Get the magnitude of the transform."""
mag = level_to_mag(self.level, self.min_mag, self.max_mag)
return -mag if np.random.rand() > self.reversal_prob else mag
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function for images, bounding boxes, masks and semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Transformed results.
"""
if self._random_disable():
return results
mag = self._get_mag()
self.homography_matrix = self._get_homography_matrix(results, mag)
self._record_homography_matrix(results)
self._transform_img(results, mag)
if results.get('gt_bboxes', None) is not None:
self._transform_bboxes(results, mag)
if results.get('gt_masks', None) is not None:
self._transform_masks(results, mag)
if results.get('gt_seg_map', None) is not None:
self._transform_seg(results, mag)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, '
repr_str += f'level={self.level}, '
repr_str += f'min_mag={self.min_mag}, '
repr_str += f'max_mag={self.max_mag}, '
repr_str += f'reversal_prob={self.reversal_prob}, '
repr_str += f'img_border_value={self.img_border_value}, '
repr_str += f'mask_border_value={self.mask_border_value}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@TRANSFORMS.register_module()
class ShearX(GeomTransform):
"""Shear the images, bboxes, masks and segmentation map horizontally.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- homography_matrix
Args:
prob (float): The probability for performing Shear and should be in
range [0, 1]. Defaults to 1.0.
level (int, optional): The level should be in range [0, _MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum angle for the horizontal shear.
Defaults to 0.0.
max_mag (float): The maximum angle for the horizontal shear.
Defaults to 30.0.
reversal_prob (float): The probability that reverses the horizontal
shear magnitude. Should be in range [0,1]. Defaults to 0.5.
img_border_value (int | float | tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 30.0,
reversal_prob: float = 0.5,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
interpolation: str = 'bilinear') -> None:
assert 0. <= min_mag <= 90., \
f'min_mag angle for ShearX should be ' \
f'in range [0, 90], got {min_mag}.'
assert 0. <= max_mag <= 90., \
f'max_mag angle for ShearX should be ' \
f'in range [0, 90], got {max_mag}.'
super().__init__(
prob=prob,
level=level,
min_mag=min_mag,
max_mag=max_mag,
reversal_prob=reversal_prob,
img_border_value=img_border_value,
mask_border_value=mask_border_value,
seg_ignore_label=seg_ignore_label,
interpolation=interpolation)
@cache_randomness
def _get_mag(self):
"""Get the magnitude of the transform."""
mag = level_to_mag(self.level, self.min_mag, self.max_mag)
mag = np.tan(mag * np.pi / 180)
return -mag if np.random.rand() > self.reversal_prob else mag
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
"""Get the homography matrix for ShearX."""
return np.array([[1, mag, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
def _transform_img(self, results: dict, mag: float) -> None:
"""Shear the image horizontally."""
results['img'] = mmcv.imshear(
results['img'],
mag,
direction='horizontal',
border_value=self.img_border_value,
interpolation=self.interpolation)
def _transform_masks(self, results: dict, mag: float) -> None:
"""Shear the masks horizontally."""
results['gt_masks'] = results['gt_masks'].shear(
results['img_shape'],
mag,
direction='horizontal',
border_value=self.mask_border_value,
interpolation=self.interpolation)
def _transform_seg(self, results: dict, mag: float) -> None:
"""Shear the segmentation map horizontally."""
results['gt_seg_map'] = mmcv.imshear(
results['gt_seg_map'],
mag,
direction='horizontal',
border_value=self.seg_ignore_label,
interpolation='nearest')
@TRANSFORMS.register_module()
class ShearY(GeomTransform):
"""Shear the images, bboxes, masks and segmentation map vertically.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- homography_matrix
Args:
prob (float): The probability for performing ShearY and should be in
range [0, 1]. Defaults to 1.0.
level (int, optional): The level should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum angle for the vertical shear.
Defaults to 0.0.
max_mag (float): The maximum angle for the vertical shear.
Defaults to 30.0.
reversal_prob (float): The probability that reverses the vertical
shear magnitude. Should be in range [0,1]. Defaults to 0.5.
img_border_value (int | float | tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 30.,
reversal_prob: float = 0.5,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
interpolation: str = 'bilinear') -> None:
assert 0. <= min_mag <= 90., \
f'min_mag angle for ShearY should be ' \
f'in range [0, 90], got {min_mag}.'
assert 0. <= max_mag <= 90., \
f'max_mag angle for ShearY should be ' \
f'in range [0, 90], got {max_mag}.'
super().__init__(
prob=prob,
level=level,
min_mag=min_mag,
max_mag=max_mag,
reversal_prob=reversal_prob,
img_border_value=img_border_value,
mask_border_value=mask_border_value,
seg_ignore_label=seg_ignore_label,
interpolation=interpolation)
@cache_randomness
def _get_mag(self):
"""Get the magnitude of the transform."""
mag = level_to_mag(self.level, self.min_mag, self.max_mag)
mag = np.tan(mag * np.pi / 180)
return -mag if np.random.rand() > self.reversal_prob else mag
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
"""Get the homography matrix for ShearY."""
return np.array([[1, 0, 0], [mag, 1, 0], [0, 0, 1]], dtype=np.float32)
def _transform_img(self, results: dict, mag: float) -> None:
"""Shear the image vertically."""
results['img'] = mmcv.imshear(
results['img'],
mag,
direction='vertical',
border_value=self.img_border_value,
interpolation=self.interpolation)
def _transform_masks(self, results: dict, mag: float) -> None:
"""Shear the masks vertically."""
results['gt_masks'] = results['gt_masks'].shear(
results['img_shape'],
mag,
direction='vertical',
border_value=self.mask_border_value,
interpolation=self.interpolation)
def _transform_seg(self, results: dict, mag: float) -> None:
"""Shear the segmentation map vertically."""
results['gt_seg_map'] = mmcv.imshear(
results['gt_seg_map'],
mag,
direction='vertical',
border_value=self.seg_ignore_label,
interpolation='nearest')
@TRANSFORMS.register_module()
class Rotate(GeomTransform):
"""Rotate the images, bboxes, masks and segmentation map.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- homography_matrix
Args:
prob (float): The probability for perform transformation and
should be in range 0 to 1. Defaults to 1.0.
level (int, optional): The level should be in range [0, _MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The maximum angle for rotation.
Defaults to 0.0.
max_mag (float): The maximum angle for rotation.
Defaults to 30.0.
reversal_prob (float): The probability that reverses the rotation
magnitude. Should be in range [0,1]. Defaults to 0.5.
img_border_value (int | float | tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 30.0,
reversal_prob: float = 0.5,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
interpolation: str = 'bilinear') -> None:
assert 0. <= min_mag <= 180., \
f'min_mag for Rotate should be in range [0,180], got {min_mag}.'
assert 0. <= max_mag <= 180., \
f'max_mag for Rotate should be in range [0,180], got {max_mag}.'
super().__init__(
prob=prob,
level=level,
min_mag=min_mag,
max_mag=max_mag,
reversal_prob=reversal_prob,
img_border_value=img_border_value,
mask_border_value=mask_border_value,
seg_ignore_label=seg_ignore_label,
interpolation=interpolation)
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
"""Get the homography matrix for Rotate."""
img_shape = results['img_shape']
center = ((img_shape[1] - 1) * 0.5, (img_shape[0] - 1) * 0.5)
cv2_rotation_matrix = cv2.getRotationMatrix2D(center, -mag, 1.0)
return np.concatenate(
[cv2_rotation_matrix,
np.array([0, 0, 1]).reshape((1, 3))]).astype(np.float32)
def _transform_img(self, results: dict, mag: float) -> None:
"""Rotate the image."""
results['img'] = mmcv.imrotate(
results['img'],
mag,
border_value=self.img_border_value,
interpolation=self.interpolation)
def _transform_masks(self, results: dict, mag: float) -> None:
"""Rotate the masks."""
results['gt_masks'] = results['gt_masks'].rotate(
results['img_shape'],
mag,
border_value=self.mask_border_value,
interpolation=self.interpolation)
def _transform_seg(self, results: dict, mag: float) -> None:
"""Rotate the segmentation map."""
results['gt_seg_map'] = mmcv.imrotate(
results['gt_seg_map'],
mag,
border_value=self.seg_ignore_label,
interpolation='nearest')
@TRANSFORMS.register_module()
class TranslateX(GeomTransform):
"""Translate the images, bboxes, masks and segmentation map horizontally.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- homography_matrix
Args:
prob (float): The probability for perform transformation and
should be in range 0 to 1. Defaults to 1.0.
level (int, optional): The level should be in range [0, _MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum pixel's offset ratio for horizontal
translation. Defaults to 0.0.
max_mag (float): The maximum pixel's offset ratio for horizontal
translation. Defaults to 0.1.
reversal_prob (float): The probability that reverses the horizontal
translation magnitude. Should be in range [0,1]. Defaults to 0.5.
img_border_value (int | float | tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 0.1,
reversal_prob: float = 0.5,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
interpolation: str = 'bilinear') -> None:
assert 0. <= min_mag <= 1., \
f'min_mag ratio for TranslateX should be ' \
f'in range [0, 1], got {min_mag}.'
assert 0. <= max_mag <= 1., \
f'max_mag ratio for TranslateX should be ' \
f'in range [0, 1], got {max_mag}.'
super().__init__(
prob=prob,
level=level,
min_mag=min_mag,
max_mag=max_mag,
reversal_prob=reversal_prob,
img_border_value=img_border_value,
mask_border_value=mask_border_value,
seg_ignore_label=seg_ignore_label,
interpolation=interpolation)
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
"""Get the homography matrix for TranslateX."""
mag = int(results['img_shape'][1] * mag)
return np.array([[1, 0, mag], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
def _transform_img(self, results: dict, mag: float) -> None:
"""Translate the image horizontally."""
mag = int(results['img_shape'][1] * mag)
results['img'] = mmcv.imtranslate(
results['img'],
mag,
direction='horizontal',
border_value=self.img_border_value,
interpolation=self.interpolation)
def _transform_masks(self, results: dict, mag: float) -> None:
"""Translate the masks horizontally."""
mag = int(results['img_shape'][1] * mag)
results['gt_masks'] = results['gt_masks'].translate(
results['img_shape'],
mag,
direction='horizontal',
border_value=self.mask_border_value,
interpolation=self.interpolation)
def _transform_seg(self, results: dict, mag: float) -> None:
"""Translate the segmentation map horizontally."""
mag = int(results['img_shape'][1] * mag)
results['gt_seg_map'] = mmcv.imtranslate(
results['gt_seg_map'],
mag,
direction='horizontal',
border_value=self.seg_ignore_label,
interpolation='nearest')
@TRANSFORMS.register_module()
class TranslateY(GeomTransform):
"""Translate the images, bboxes, masks and segmentation map vertically.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- homography_matrix
Args:
prob (float): The probability for perform transformation and
should be in range 0 to 1. Defaults to 1.0.
level (int, optional): The level should be in range [0, _MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum pixel's offset ratio for vertical
translation. Defaults to 0.0.
max_mag (float): The maximum pixel's offset ratio for vertical
translation. Defaults to 0.1.
reversal_prob (float): The probability that reverses the vertical
translation magnitude. Should be in range [0,1]. Defaults to 0.5.
img_border_value (int | float | tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 0.1,
reversal_prob: float = 0.5,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
interpolation: str = 'bilinear') -> None:
assert 0. <= min_mag <= 1., \
f'min_mag ratio for TranslateY should be ' \
f'in range [0,1], got {min_mag}.'
assert 0. <= max_mag <= 1., \
f'max_mag ratio for TranslateY should be ' \
f'in range [0,1], got {max_mag}.'
super().__init__(
prob=prob,
level=level,
min_mag=min_mag,
max_mag=max_mag,
reversal_prob=reversal_prob,
img_border_value=img_border_value,
mask_border_value=mask_border_value,
seg_ignore_label=seg_ignore_label,
interpolation=interpolation)
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
"""Get the homography matrix for TranslateY."""
mag = int(results['img_shape'][0] * mag)
return np.array([[1, 0, 0], [0, 1, mag], [0, 0, 1]], dtype=np.float32)
def _transform_img(self, results: dict, mag: float) -> None:
"""Translate the image vertically."""
mag = int(results['img_shape'][0] * mag)
results['img'] = mmcv.imtranslate(
results['img'],
mag,
direction='vertical',
border_value=self.img_border_value,
interpolation=self.interpolation)
def _transform_masks(self, results: dict, mag: float) -> None:
"""Translate masks vertically."""
mag = int(results['img_shape'][0] * mag)
results['gt_masks'] = results['gt_masks'].translate(
results['img_shape'],
mag,
direction='vertical',
border_value=self.mask_border_value,
interpolation=self.interpolation)
def _transform_seg(self, results: dict, mag: float) -> None:
"""Translate segmentation map vertically."""
mag = int(results['img_shape'][0] * mag)
results['gt_seg_map'] = mmcv.imtranslate(
results['gt_seg_map'],
mag,
direction='vertical',
border_value=self.seg_ignore_label,
interpolation='nearest')
| 30,322 | 39.162914 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/wrappers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Callable, Dict, List, Optional, Union
import numpy as np
from mmcv.transforms import BaseTransform, Compose
from mmcv.transforms.utils import cache_random_params, cache_randomness
from mmdet.registry import TRANSFORMS
@TRANSFORMS.register_module()
class MultiBranch(BaseTransform):
r"""Multiple branch pipeline wrapper.
Generate multiple data-augmented versions of the same image.
`MultiBranch` needs to specify the branch names of all
pipelines of the dataset, perform corresponding data augmentation
for the current branch, and return None for other branches,
which ensures the consistency of return format across
different samples.
Args:
branch_field (list): List of branch names.
branch_pipelines (dict): Dict of different pipeline configs
to be composed.
Examples:
>>> branch_field = ['sup', 'unsup_teacher', 'unsup_student']
>>> sup_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='LoadAnnotations', with_bbox=True),
>>> dict(type='Resize', scale=(1333, 800), keep_ratio=True),
>>> dict(type='RandomFlip', prob=0.5),
>>> dict(
>>> type='MultiBranch',
>>> branch_field=branch_field,
>>> sup=dict(type='PackDetInputs'))
>>> ]
>>> weak_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='LoadAnnotations', with_bbox=True),
>>> dict(type='Resize', scale=(1333, 800), keep_ratio=True),
>>> dict(type='RandomFlip', prob=0.0),
>>> dict(
>>> type='MultiBranch',
>>> branch_field=branch_field,
>>> sup=dict(type='PackDetInputs'))
>>> ]
>>> strong_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='LoadAnnotations', with_bbox=True),
>>> dict(type='Resize', scale=(1333, 800), keep_ratio=True),
>>> dict(type='RandomFlip', prob=1.0),
>>> dict(
>>> type='MultiBranch',
>>> branch_field=branch_field,
>>> sup=dict(type='PackDetInputs'))
>>> ]
>>> unsup_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='LoadEmptyAnnotations'),
>>> dict(
>>> type='MultiBranch',
>>> branch_field=branch_field,
>>> unsup_teacher=weak_pipeline,
>>> unsup_student=strong_pipeline)
>>> ]
>>> from mmcv.transforms import Compose
>>> sup_branch = Compose(sup_pipeline)
>>> unsup_branch = Compose(unsup_pipeline)
>>> print(sup_branch)
>>> Compose(
>>> LoadImageFromFile(ignore_empty=False, to_float32=False, color_type='color', imdecode_backend='cv2') # noqa
>>> LoadAnnotations(with_bbox=True, with_label=True, with_mask=False, with_seg=False, poly2mask=True, imdecode_backend='cv2') # noqa
>>> Resize(scale=(1333, 800), scale_factor=None, keep_ratio=True, clip_object_border=True), backend=cv2), interpolation=bilinear) # noqa
>>> RandomFlip(prob=0.5, direction=horizontal)
>>> MultiBranch(branch_pipelines=['sup'])
>>> )
>>> print(unsup_branch)
>>> Compose(
>>> LoadImageFromFile(ignore_empty=False, to_float32=False, color_type='color', imdecode_backend='cv2') # noqa
>>> LoadEmptyAnnotations(with_bbox=True, with_label=True, with_mask=False, with_seg=False, seg_ignore_label=255) # noqa
>>> MultiBranch(branch_pipelines=['unsup_teacher', 'unsup_student'])
>>> )
"""
def __init__(self, branch_field: List[str],
**branch_pipelines: dict) -> None:
self.branch_field = branch_field
self.branch_pipelines = {
branch: Compose(pipeline)
for branch, pipeline in branch_pipelines.items()
}
def transform(self, results: dict) -> dict:
"""Transform function to apply transforms sequentially.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict:
- 'inputs' (Dict[str, obj:`torch.Tensor`]): The forward data of
models from different branches.
- 'data_sample' (Dict[str,obj:`DetDataSample`]): The annotation
info of the sample from different branches.
"""
multi_results = {}
for branch in self.branch_field:
multi_results[branch] = {'inputs': None, 'data_samples': None}
for branch, pipeline in self.branch_pipelines.items():
branch_results = pipeline(copy.deepcopy(results))
# If one branch pipeline returns None,
# it will sample another data from dataset.
if branch_results is None:
return None
multi_results[branch] = branch_results
format_results = {}
for branch, results in multi_results.items():
for key in results.keys():
if format_results.get(key, None) is None:
format_results[key] = {branch: results[key]}
else:
format_results[key][branch] = results[key]
return format_results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(branch_pipelines={list(self.branch_pipelines.keys())})'
return repr_str
@TRANSFORMS.register_module()
class RandomOrder(Compose):
"""Shuffle the transform Sequence."""
@cache_randomness
def _random_permutation(self):
return np.random.permutation(len(self.transforms))
def transform(self, results: Dict) -> Optional[Dict]:
"""Transform function to apply transforms in random order.
Args:
results (dict): A result dict contains the results to transform.
Returns:
dict or None: Transformed results.
"""
inds = self._random_permutation()
for idx in inds:
t = self.transforms[idx]
results = t(results)
if results is None:
return None
return results
def __repr__(self):
"""Compute the string representation."""
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += f'{t.__class__.__name__}, '
format_string += ')'
return format_string
@TRANSFORMS.register_module()
class ProposalBroadcaster(BaseTransform):
"""A transform wrapper to apply the wrapped transforms to process both
`gt_bboxes` and `proposals` without adding any codes. It will do the
following steps:
1. Scatter the broadcasting targets to a list of inputs of the wrapped
transforms. The type of the list should be list[dict, dict], which
the first is the original inputs, the second is the processing
results that `gt_bboxes` being rewritten by the `proposals`.
2. Apply ``self.transforms``, with same random parameters, which is
sharing with a context manager. The type of the outputs is a
list[dict, dict].
3. Gather the outputs, update the `proposals` in the first item of
the outputs with the `gt_bboxes` in the second .
Args:
transforms (list, optional): Sequence of transform
object or config dict to be wrapped. Defaults to [].
Note: The `TransformBroadcaster` in MMCV can achieve the same operation as
`ProposalBroadcaster`, but need to set more complex parameters.
Examples:
>>> pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='LoadProposals', num_max_proposals=2000),
>>> dict(type='LoadAnnotations', with_bbox=True),
>>> dict(
>>> type='ProposalBroadcaster',
>>> transforms=[
>>> dict(type='Resize', scale=(1333, 800),
>>> keep_ratio=True),
>>> dict(type='RandomFlip', prob=0.5),
>>> ]),
>>> dict(type='PackDetInputs')]
"""
def __init__(self, transforms: List[Union[dict, Callable]] = []) -> None:
self.transforms = Compose(transforms)
def transform(self, results: dict) -> dict:
"""Apply wrapped transform functions to process both `gt_bboxes` and
`proposals`.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
assert results.get('proposals', None) is not None, \
'`proposals` should be in the results, please delete ' \
'`ProposalBroadcaster` in your configs, or check whether ' \
'you have load proposals successfully.'
inputs = self._process_input(results)
outputs = self._apply_transforms(inputs)
outputs = self._process_output(outputs)
return outputs
def _process_input(self, data: dict) -> list:
"""Scatter the broadcasting targets to a list of inputs of the wrapped
transforms.
Args:
data (dict): The original input data.
Returns:
list[dict]: A list of input data.
"""
cp_data = copy.deepcopy(data)
cp_data['gt_bboxes'] = cp_data['proposals']
scatters = [data, cp_data]
return scatters
def _apply_transforms(self, inputs: list) -> list:
"""Apply ``self.transforms``.
Args:
inputs (list[dict, dict]): list of input data.
Returns:
list[dict]: The output of the wrapped pipeline.
"""
assert len(inputs) == 2
ctx = cache_random_params
with ctx(self.transforms):
output_scatters = [self.transforms(_input) for _input in inputs]
return output_scatters
def _process_output(self, output_scatters: list) -> dict:
"""Gathering and renaming data items.
Args:
output_scatters (list[dict, dict]): The output of the wrapped
pipeline.
Returns:
dict: Updated result dict.
"""
assert isinstance(output_scatters, list) and \
isinstance(output_scatters[0], dict) and \
len(output_scatters) == 2
outputs = output_scatters[0]
outputs['proposals'] = output_scatters[1]['gt_bboxes']
return outputs
| 10,689 | 37.453237 | 148 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader'
]
| 2,191 | 58.243243 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/colorspace.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import mmcv
import numpy as np
from mmcv.transforms import BaseTransform
from mmcv.transforms.utils import cache_randomness
from mmdet.registry import TRANSFORMS
from .augment_wrappers import _MAX_LEVEL, level_to_mag
@TRANSFORMS.register_module()
class ColorTransform(BaseTransform):
"""Base class for color transformations. All color transformations need to
inherit from this base class. ``ColorTransform`` unifies the class
attributes and class functions of color transformations (Color, Brightness,
Contrast, Sharpness, Solarize, SolarizeAdd, Equalize, AutoContrast, Invert,
and Posterize), and only distort color channels, without impacting the
locations of the instances.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing the geometric
transformation and should be in range [0, 1]. Defaults to 1.0.
level (int, optional): The level should be in range [0, _MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for color transformation.
Defaults to 0.1.
max_mag (float): The maximum magnitude for color transformation.
Defaults to 1.9.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.1,
max_mag: float = 1.9) -> None:
assert 0 <= prob <= 1.0, f'The probability of the transformation ' \
f'should be in range [0,1], got {prob}.'
assert level is None or isinstance(level, int), \
f'The level should be None or type int, got {type(level)}.'
assert level is None or 0 <= level <= _MAX_LEVEL, \
f'The level should be in range [0,{_MAX_LEVEL}], got {level}.'
assert isinstance(min_mag, float), \
f'min_mag should be type float, got {type(min_mag)}.'
assert isinstance(max_mag, float), \
f'max_mag should be type float, got {type(max_mag)}.'
assert min_mag <= max_mag, \
f'min_mag should smaller than max_mag, ' \
f'got min_mag={min_mag} and max_mag={max_mag}'
self.prob = prob
self.level = level
self.min_mag = min_mag
self.max_mag = max_mag
def _transform_img(self, results: dict, mag: float) -> None:
"""Transform the image."""
pass
@cache_randomness
def _random_disable(self):
"""Randomly disable the transform."""
return np.random.rand() > self.prob
@cache_randomness
def _get_mag(self):
"""Get the magnitude of the transform."""
return level_to_mag(self.level, self.min_mag, self.max_mag)
def transform(self, results: dict) -> dict:
"""Transform function for images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Transformed results.
"""
if self._random_disable():
return results
mag = self._get_mag()
self._transform_img(results, mag)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, '
repr_str += f'level={self.level}, '
repr_str += f'min_mag={self.min_mag}, '
repr_str += f'max_mag={self.max_mag})'
return repr_str
@TRANSFORMS.register_module()
class Color(ColorTransform):
"""Adjust the color balance of the image, in a manner similar to the
controls on a colour TV set. A magnitude=0 gives a black & white image,
whereas magnitude=1 gives the original image. The bboxes, masks and
segmentations are not modified.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Color transformation.
Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for Color transformation.
Defaults to 0.1.
max_mag (float): The maximum magnitude for Color transformation.
Defaults to 1.9.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.1,
max_mag: float = 1.9) -> None:
assert 0. <= min_mag <= 2.0, \
f'min_mag for Color should be in range [0,2], got {min_mag}.'
assert 0. <= max_mag <= 2.0, \
f'max_mag for Color should be in range [0,2], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""Apply Color transformation to image."""
# NOTE defaultly the image should be BGR format
img = results['img']
results['img'] = mmcv.adjust_color(img, mag).astype(img.dtype)
@TRANSFORMS.register_module()
class Brightness(ColorTransform):
"""Adjust the brightness of the image. A magnitude=0 gives a black image,
whereas magnitude=1 gives the original image. The bboxes, masks and
segmentations are not modified.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Brightness transformation.
Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for Brightness transformation.
Defaults to 0.1.
max_mag (float): The maximum magnitude for Brightness transformation.
Defaults to 1.9.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.1,
max_mag: float = 1.9) -> None:
assert 0. <= min_mag <= 2.0, \
f'min_mag for Brightness should be in range [0,2], got {min_mag}.'
assert 0. <= max_mag <= 2.0, \
f'max_mag for Brightness should be in range [0,2], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""Adjust the brightness of image."""
img = results['img']
results['img'] = mmcv.adjust_brightness(img, mag).astype(img.dtype)
@TRANSFORMS.register_module()
class Contrast(ColorTransform):
"""Control the contrast of the image. A magnitude=0 gives a gray image,
whereas magnitude=1 gives the original imageThe bboxes, masks and
segmentations are not modified.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Contrast transformation.
Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for Contrast transformation.
Defaults to 0.1.
max_mag (float): The maximum magnitude for Contrast transformation.
Defaults to 1.9.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.1,
max_mag: float = 1.9) -> None:
assert 0. <= min_mag <= 2.0, \
f'min_mag for Contrast should be in range [0,2], got {min_mag}.'
assert 0. <= max_mag <= 2.0, \
f'max_mag for Contrast should be in range [0,2], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""Adjust the image contrast."""
img = results['img']
results['img'] = mmcv.adjust_contrast(img, mag).astype(img.dtype)
@TRANSFORMS.register_module()
class Sharpness(ColorTransform):
"""Adjust images sharpness. A positive magnitude would enhance the
sharpness and a negative magnitude would make the image blurry. A
magnitude=0 gives the origin img.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Sharpness transformation.
Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for Sharpness transformation.
Defaults to 0.1.
max_mag (float): The maximum magnitude for Sharpness transformation.
Defaults to 1.9.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.1,
max_mag: float = 1.9) -> None:
assert 0. <= min_mag <= 2.0, \
f'min_mag for Sharpness should be in range [0,2], got {min_mag}.'
assert 0. <= max_mag <= 2.0, \
f'max_mag for Sharpness should be in range [0,2], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""Adjust the image sharpness."""
img = results['img']
results['img'] = mmcv.adjust_sharpness(img, mag).astype(img.dtype)
@TRANSFORMS.register_module()
class Solarize(ColorTransform):
"""Solarize images (Invert all pixels above a threshold value of
magnitude.).
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Solarize transformation.
Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for Solarize transformation.
Defaults to 0.0.
max_mag (float): The maximum magnitude for Solarize transformation.
Defaults to 256.0.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 256.0) -> None:
assert 0. <= min_mag <= 256.0, f'min_mag for Solarize should be ' \
f'in range [0, 256], got {min_mag}.'
assert 0. <= max_mag <= 256.0, f'max_mag for Solarize should be ' \
f'in range [0, 256], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""Invert all pixel values above magnitude."""
img = results['img']
results['img'] = mmcv.solarize(img, mag).astype(img.dtype)
@TRANSFORMS.register_module()
class SolarizeAdd(ColorTransform):
"""SolarizeAdd images. For each pixel in the image that is less than 128,
add an additional amount to it decided by the magnitude.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing SolarizeAdd
transformation. Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for SolarizeAdd transformation.
Defaults to 0.0.
max_mag (float): The maximum magnitude for SolarizeAdd transformation.
Defaults to 110.0.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 110.0) -> None:
assert 0. <= min_mag <= 110.0, f'min_mag for SolarizeAdd should be ' \
f'in range [0, 110], got {min_mag}.'
assert 0. <= max_mag <= 110.0, f'max_mag for SolarizeAdd should be ' \
f'in range [0, 110], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""SolarizeAdd the image."""
img = results['img']
img_solarized = np.where(img < 128, np.minimum(img + mag, 255), img)
results['img'] = img_solarized.astype(img.dtype)
@TRANSFORMS.register_module()
class Posterize(ColorTransform):
"""Posterize images (reduce the number of bits for each color channel).
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Posterize
transformation. Defaults to 1.0.
level (int, optional): Should be in range [0,_MAX_LEVEL].
If level is None, it will generate from [0, _MAX_LEVEL] randomly.
Defaults to None.
min_mag (float): The minimum magnitude for Posterize transformation.
Defaults to 0.0.
max_mag (float): The maximum magnitude for Posterize transformation.
Defaults to 4.0.
"""
def __init__(self,
prob: float = 1.0,
level: Optional[int] = None,
min_mag: float = 0.0,
max_mag: float = 4.0) -> None:
assert 0. <= min_mag <= 8.0, f'min_mag for Posterize should be ' \
f'in range [0, 8], got {min_mag}.'
assert 0. <= max_mag <= 8.0, f'max_mag for Posterize should be ' \
f'in range [0, 8], got {max_mag}.'
super().__init__(
prob=prob, level=level, min_mag=min_mag, max_mag=max_mag)
def _transform_img(self, results: dict, mag: float) -> None:
"""Posterize the image."""
img = results['img']
results['img'] = mmcv.posterize(img, math.ceil(mag)).astype(img.dtype)
@TRANSFORMS.register_module()
class Equalize(ColorTransform):
"""Equalize the image histogram. The bboxes, masks and segmentations are
not modified.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing Equalize transformation.
Defaults to 1.0.
level (int, optional): No use for Equalize transformation.
Defaults to None.
min_mag (float): No use for Equalize transformation. Defaults to 0.1.
max_mag (float): No use for Equalize transformation. Defaults to 1.9.
"""
def _transform_img(self, results: dict, mag: float) -> None:
"""Equalizes the histogram of one image."""
img = results['img']
results['img'] = mmcv.imequalize(img).astype(img.dtype)
@TRANSFORMS.register_module()
class AutoContrast(ColorTransform):
"""Auto adjust image contrast.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing AutoContrast should
be in range [0, 1]. Defaults to 1.0.
level (int, optional): No use for AutoContrast transformation.
Defaults to None.
min_mag (float): No use for AutoContrast transformation.
Defaults to 0.1.
max_mag (float): No use for AutoContrast transformation.
Defaults to 1.9.
"""
def _transform_img(self, results: dict, mag: float) -> None:
"""Auto adjust image contrast."""
img = results['img']
results['img'] = mmcv.auto_contrast(img).astype(img.dtype)
@TRANSFORMS.register_module()
class Invert(ColorTransform):
"""Invert images.
Required Keys:
- img
Modified Keys:
- img
Args:
prob (float): The probability for performing invert therefore should
be in range [0, 1]. Defaults to 1.0.
level (int, optional): No use for Invert transformation.
Defaults to None.
min_mag (float): No use for Invert transformation. Defaults to 0.1.
max_mag (float): No use for Invert transformation. Defaults to 1.9.
"""
def _transform_img(self, results: dict, mag: float) -> None:
"""Invert the image."""
img = results['img']
results['img'] = mmcv.iminvert(img).astype(img.dtype)
| 16,984 | 33.382591 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/augment_wrappers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
import numpy as np
from mmcv.transforms import RandomChoice
from mmcv.transforms.utils import cache_randomness
from mmengine.config import ConfigDict
from mmdet.registry import TRANSFORMS
# AutoAugment uses reinforcement learning to search for
# some widely useful data augmentation strategies,
# here we provide AUTOAUG_POLICIES_V0.
# For AUTOAUG_POLICIES_V0, each tuple is an augmentation
# operation of the form (operation, probability, magnitude).
# Each element in policies is a policy that will be applied
# sequentially on the image.
# RandAugment defines a data augmentation search space, RANDAUG_SPACE,
# sampling 1~3 data augmentations each time, and
# setting the magnitude of each data augmentation randomly,
# which will be applied sequentially on the image.
_MAX_LEVEL = 10
AUTOAUG_POLICIES_V0 = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
def policies_v0():
"""Autoaugment policies that was used in AutoAugment Paper."""
policies = list()
for policy_args in AUTOAUG_POLICIES_V0:
policy = list()
for args in policy_args:
policy.append(dict(type=args[0], prob=args[1], level=args[2]))
policies.append(policy)
return policies
RANDAUG_SPACE = [[dict(type='AutoContrast')], [dict(type='Equalize')],
[dict(type='Invert')], [dict(type='Rotate')],
[dict(type='Posterize')], [dict(type='Solarize')],
[dict(type='SolarizeAdd')], [dict(type='Color')],
[dict(type='Contrast')], [dict(type='Brightness')],
[dict(type='Sharpness')], [dict(type='ShearX')],
[dict(type='ShearY')], [dict(type='TranslateX')],
[dict(type='TranslateY')]]
def level_to_mag(level: Optional[int], min_mag: float,
max_mag: float) -> float:
"""Map from level to magnitude."""
if level is None:
return round(np.random.rand() * (max_mag - min_mag) + min_mag, 1)
else:
return round(level / _MAX_LEVEL * (max_mag - min_mag) + min_mag, 1)
@TRANSFORMS.register_module()
class AutoAugment(RandomChoice):
"""Auto augmentation.
This data augmentation is proposed in `AutoAugment: Learning
Augmentation Policies from Data <https://arxiv.org/abs/1805.09501>`_
and in `Learning Data Augmentation Strategies for Object Detection
<https://arxiv.org/pdf/1906.11172>`_.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_ignore_flags (bool) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes
- gt_bboxes_labels
- gt_masks
- gt_ignore_flags
- gt_seg_map
Added Keys:
- homography_matrix
Args:
policies (List[List[Union[dict, ConfigDict]]]):
The policies of auto augmentation.Each policy in ``policies``
is a specific augmentation policy, and is composed by several
augmentations. When AutoAugment is called, a random policy in
``policies`` will be selected to augment images.
Defaults to policy_v0().
prob (list[float], optional): The probabilities associated
with each policy. The length should be equal to the policy
number and the sum should be 1. If not given, a uniform
distribution will be assumed. Defaults to None.
Examples:
>>> policies = [
>>> [
>>> dict(type='Sharpness', prob=0.0, level=8),
>>> dict(type='ShearX', prob=0.4, level=0,)
>>> ],
>>> [
>>> dict(type='Rotate', prob=0.6, level=10),
>>> dict(type='Color', prob=1.0, level=6)
>>> ]
>>> ]
>>> augmentation = AutoAugment(policies)
>>> img = np.ones(100, 100, 3)
>>> gt_bboxes = np.ones(10, 4)
>>> results = dict(img=img, gt_bboxes=gt_bboxes)
>>> results = augmentation(results)
"""
def __init__(self,
policies: List[List[Union[dict, ConfigDict]]] = policies_v0(),
prob: Optional[List[float]] = None) -> None:
assert isinstance(policies, list) and len(policies) > 0, \
'Policies must be a non-empty list.'
for policy in policies:
assert isinstance(policy, list) and len(policy) > 0, \
'Each policy in policies must be a non-empty list.'
for augment in policy:
assert isinstance(augment, dict) and 'type' in augment, \
'Each specific augmentation must be a dict with key' \
' "type".'
super().__init__(transforms=policies, prob=prob)
self.policies = policies
def __repr__(self) -> str:
return f'{self.__class__.__name__}(policies={self.policies}, ' \
f'prob={self.prob})'
@TRANSFORMS.register_module()
class RandAugment(RandomChoice):
"""Rand augmentation.
This data augmentation is proposed in `RandAugment:
Practical automated data augmentation with a reduced
search space <https://arxiv.org/abs/1909.13719>`_.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_ignore_flags (bool) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes
- gt_bboxes_labels
- gt_masks
- gt_ignore_flags
- gt_seg_map
Added Keys:
- homography_matrix
Args:
aug_space (List[List[Union[dict, ConfigDict]]]): The augmentation space
of rand augmentation. Each augmentation transform in ``aug_space``
is a specific transform, and is composed by several augmentations.
When RandAugment is called, a random transform in ``aug_space``
will be selected to augment images. Defaults to aug_space.
aug_num (int): Number of augmentation to apply equentially.
Defaults to 2.
prob (list[float], optional): The probabilities associated with
each augmentation. The length should be equal to the
augmentation space and the sum should be 1. If not given,
a uniform distribution will be assumed. Defaults to None.
Examples:
>>> aug_space = [
>>> dict(type='Sharpness'),
>>> dict(type='ShearX'),
>>> dict(type='Color'),
>>> ],
>>> augmentation = RandAugment(aug_space)
>>> img = np.ones(100, 100, 3)
>>> gt_bboxes = np.ones(10, 4)
>>> results = dict(img=img, gt_bboxes=gt_bboxes)
>>> results = augmentation(results)
"""
def __init__(self,
aug_space: List[Union[dict, ConfigDict]] = RANDAUG_SPACE,
aug_num: int = 2,
prob: Optional[List[float]] = None) -> None:
assert isinstance(aug_space, list) and len(aug_space) > 0, \
'Augmentation space must be a non-empty list.'
for aug in aug_space:
assert isinstance(aug, list) and len(aug) == 1, \
'Each augmentation in aug_space must be a list.'
for transform in aug:
assert isinstance(transform, dict) and 'type' in transform, \
'Each specific transform must be a dict with key' \
' "type".'
super().__init__(transforms=aug_space, prob=prob)
self.aug_space = aug_space
self.aug_num = aug_num
@cache_randomness
def random_pipeline_index(self):
indices = np.arange(len(self.transforms))
return np.random.choice(
indices, self.aug_num, p=self.prob, replace=False)
def transform(self, results: dict) -> dict:
"""Transform function to use RandAugment.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with RandAugment.
"""
for idx in self.random_pipeline_index():
results = self.transforms[idx](results)
return results
def __repr__(self) -> str:
return f'{self.__class__.__name__}(' \
f'aug_space={self.aug_space}, '\
f'aug_num={self.aug_num}, ' \
f'prob={self.prob})'
| 9,738 | 35.750943 | 79 |
py
|
ERD
|
ERD-main/mmdet/datasets/transforms/transforms.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
import math
from typing import List, Optional, Sequence, Tuple, Union
import cv2
import mmcv
import numpy as np
from mmcv.image.geometric import _scale_size
from mmcv.transforms import BaseTransform
from mmcv.transforms import Pad as MMCV_Pad
from mmcv.transforms import RandomFlip as MMCV_RandomFlip
from mmcv.transforms import Resize as MMCV_Resize
from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness
from mmengine.dataset import BaseDataset
from mmengine.utils import is_str
from numpy import random
from mmdet.registry import TRANSFORMS
from mmdet.structures.bbox import HorizontalBoxes, autocast_box_type
from mmdet.structures.mask import BitmapMasks, PolygonMasks
from mmdet.utils import log_img_scale
try:
from imagecorruptions import corrupt
except ImportError:
corrupt = None
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
Number = Union[int, float]
@TRANSFORMS.register_module()
class Resize(MMCV_Resize):
"""Resize images & bbox & seg.
This transform resizes the input image according to ``scale`` or
``scale_factor``. Bboxes, masks, and seg map are then resized
with the same scale factor.
if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to
resize.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- scale
- scale_factor
- keep_ratio
- homography_matrix
Args:
scale (int or tuple): Images scales for resizing. Defaults to None
scale_factor (float or tuple[float]): Scale factors for resizing.
Defaults to None.
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image. Defaults to False.
clip_object_border (bool): Whether to clip the objects
outside the border of the image. In some dataset like MOT17, the gt
bboxes are allowed to cross the border of images. Therefore, we
don't need to clip the gt bboxes in these cases. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def _resize_masks(self, results: dict) -> None:
"""Resize masks with ``results['scale']``"""
if results.get('gt_masks', None) is not None:
if self.keep_ratio:
results['gt_masks'] = results['gt_masks'].rescale(
results['scale'])
else:
results['gt_masks'] = results['gt_masks'].resize(
results['img_shape'])
def _resize_bboxes(self, results: dict) -> None:
"""Resize bounding boxes with ``results['scale_factor']``."""
if results.get('gt_bboxes', None) is not None:
results['gt_bboxes'].rescale_(results['scale_factor'])
if self.clip_object_border:
results['gt_bboxes'].clip_(results['img_shape'])
def _resize_seg(self, results: dict) -> None:
"""Resize semantic segmentation map with ``results['scale']``."""
if results.get('gt_seg_map', None) is not None:
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results['gt_seg_map'],
results['scale'],
interpolation='nearest',
backend=self.backend)
else:
gt_seg = mmcv.imresize(
results['gt_seg_map'],
results['scale'],
interpolation='nearest',
backend=self.backend)
results['gt_seg_map'] = gt_seg
def _record_homography_matrix(self, results: dict) -> None:
"""Record the homography matrix for the Resize."""
w_scale, h_scale = results['scale_factor']
homography_matrix = np.array(
[[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)
if results.get('homography_matrix', None) is None:
results['homography_matrix'] = homography_matrix
else:
results['homography_matrix'] = homography_matrix @ results[
'homography_matrix']
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to resize images, bounding boxes and semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',
'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys
are updated in result dict.
"""
if self.scale:
results['scale'] = self.scale
else:
img_shape = results['img'].shape[:2]
results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
self._record_homography_matrix(results)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(scale={self.scale}, '
repr_str += f'scale_factor={self.scale_factor}, '
repr_str += f'keep_ratio={self.keep_ratio}, '
repr_str += f'clip_object_border={self.clip_object_border}), '
repr_str += f'backend={self.backend}), '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@TRANSFORMS.register_module()
class FixShapeResize(Resize):
"""Resize images & bbox & seg to the specified size.
This transform resizes the input image according to ``width`` and
``height``. Bboxes, masks, and seg map are then resized
with the same parameters.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- scale
- scale_factor
- keep_ratio
- homography_matrix
Args:
width (int): width for resizing.
height (int): height for resizing.
Defaults to None.
pad_val (Number | dict[str, Number], optional): Padding value for if
the pad_mode is "constant". If it is a single number, the value
to pad the image is the number and to pad the semantic
segmentation map is 255. If it is a dict, it should have the
following keys:
- img: The value to pad the image.
- seg: The value to pad the semantic segmentation map.
Defaults to dict(img=0, seg=255).
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image. Defaults to False.
clip_object_border (bool): Whether to clip the objects
outside the border of the image. In some dataset like MOT17, the gt
bboxes are allowed to cross the border of images. Therefore, we
don't need to clip the gt bboxes in these cases. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend. Defaults
to 'bilinear'.
"""
def __init__(self,
width: int,
height: int,
pad_val: Union[Number, dict] = dict(img=0, seg=255),
keep_ratio: bool = False,
clip_object_border: bool = True,
backend: str = 'cv2',
interpolation: str = 'bilinear') -> None:
assert width is not None and height is not None, (
'`width` and'
'`height` can not be `None`')
self.width = width
self.height = height
self.scale = (width, height)
self.backend = backend
self.interpolation = interpolation
self.keep_ratio = keep_ratio
self.clip_object_border = clip_object_border
if keep_ratio is True:
# padding to the fixed size when keep_ratio=True
self.pad_transform = Pad(size=self.scale, pad_val=pad_val)
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to resize images, bounding boxes and semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',
'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys
are updated in result dict.
"""
img = results['img']
h, w = img.shape[:2]
if self.keep_ratio:
scale_factor = min(self.width / w, self.height / h)
results['scale_factor'] = (scale_factor, scale_factor)
real_w, real_h = int(w * float(scale_factor) +
0.5), int(h * float(scale_factor) + 0.5)
img, scale_factor = mmcv.imrescale(
results['img'], (real_w, real_h),
interpolation=self.interpolation,
return_scale=True,
backend=self.backend)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
results['img'] = img
results['img_shape'] = img.shape[:2]
results['keep_ratio'] = self.keep_ratio
results['scale'] = (real_w, real_h)
else:
results['scale'] = (self.width, self.height)
results['scale_factor'] = (self.width / w, self.height / h)
super()._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
self._record_homography_matrix(results)
if self.keep_ratio:
self.pad_transform(results)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(width={self.width}, height={self.height}, '
repr_str += f'keep_ratio={self.keep_ratio}, '
repr_str += f'clip_object_border={self.clip_object_border}), '
repr_str += f'backend={self.backend}), '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@TRANSFORMS.register_module()
class RandomFlip(MMCV_RandomFlip):
"""Flip the image & bbox & mask & segmentation map. Added or Updated keys:
flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip
modes:
- ``prob`` is float, ``direction`` is string: the image will be
``direction``ly flipped with probability of ``prob`` .
E.g., ``prob=0.5``, ``direction='horizontal'``,
then image will be horizontally flipped with probability of 0.5.
- ``prob`` is float, ``direction`` is list of string: the image will
be ``direction[i]``ly flipped with probability of
``prob/len(direction)``.
E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,
then image will be horizontally flipped with probability of 0.25,
vertically with probability of 0.25.
- ``prob`` is list of float, ``direction`` is list of string:
given ``len(prob) == len(direction)``, the image will
be ``direction[i]``ly flipped with probability of ``prob[i]``.
E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',
'vertical']``, then image will be horizontally flipped with
probability of 0.3, vertically with probability of 0.5.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_masks
- gt_seg_map
Added Keys:
- flip
- flip_direction
- homography_matrix
Args:
prob (float | list[float], optional): The flipping probability.
Defaults to None.
direction(str | list[str]): The flipping direction. Options
If input is a list, the length must equal ``prob``. Each
element in ``prob`` indicates the flip probability of
corresponding direction. Defaults to 'horizontal'.
"""
def _record_homography_matrix(self, results: dict) -> None:
"""Record the homography matrix for the RandomFlip."""
cur_dir = results['flip_direction']
h, w = results['img'].shape[:2]
if cur_dir == 'horizontal':
homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],
dtype=np.float32)
elif cur_dir == 'vertical':
homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],
dtype=np.float32)
elif cur_dir == 'diagonal':
homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],
dtype=np.float32)
else:
homography_matrix = np.eye(3, dtype=np.float32)
if results.get('homography_matrix', None) is None:
results['homography_matrix'] = homography_matrix
else:
results['homography_matrix'] = homography_matrix @ results[
'homography_matrix']
@autocast_box_type()
def _flip(self, results: dict) -> None:
"""Flip images, bounding boxes, and semantic segmentation map."""
# flip image
results['img'] = mmcv.imflip(
results['img'], direction=results['flip_direction'])
img_shape = results['img'].shape[:2]
# flip bboxes
if results.get('gt_bboxes', None) is not None:
results['gt_bboxes'].flip_(img_shape, results['flip_direction'])
# flip masks
if results.get('gt_masks', None) is not None:
results['gt_masks'] = results['gt_masks'].flip(
results['flip_direction'])
# flip segs
if results.get('gt_seg_map', None) is not None:
results['gt_seg_map'] = mmcv.imflip(
results['gt_seg_map'], direction=results['flip_direction'])
# record homography matrix for flip
self._record_homography_matrix(results)
@TRANSFORMS.register_module()
class RandomShift(BaseTransform):
"""Shift the image and box given shift pixels and probability.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32])
- gt_bboxes_labels (np.int64)
- gt_ignore_flags (bool) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_bboxes_labels
- gt_ignore_flags (bool) (optional)
Args:
prob (float): Probability of shifts. Defaults to 0.5.
max_shift_px (int): The max pixels for shifting. Defaults to 32.
filter_thr_px (int): The width and height threshold for filtering.
The bbox and the rest of the targets below the width and
height threshold will be filtered. Defaults to 1.
"""
def __init__(self,
prob: float = 0.5,
max_shift_px: int = 32,
filter_thr_px: int = 1) -> None:
assert 0 <= prob <= 1
assert max_shift_px >= 0
self.prob = prob
self.max_shift_px = max_shift_px
self.filter_thr_px = int(filter_thr_px)
@cache_randomness
def _random_prob(self) -> float:
return random.uniform(0, 1)
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to random shift images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Shift results.
"""
if self._random_prob() < self.prob:
img_shape = results['img'].shape[:2]
random_shift_x = random.randint(-self.max_shift_px,
self.max_shift_px)
random_shift_y = random.randint(-self.max_shift_px,
self.max_shift_px)
new_x = max(0, random_shift_x)
ori_x = max(0, -random_shift_x)
new_y = max(0, random_shift_y)
ori_y = max(0, -random_shift_y)
# TODO: support mask and semantic segmentation maps.
bboxes = results['gt_bboxes'].clone()
bboxes.translate_([random_shift_x, random_shift_y])
# clip border
bboxes.clip_(img_shape)
# remove invalid bboxes
valid_inds = (bboxes.widths > self.filter_thr_px).numpy() & (
bboxes.heights > self.filter_thr_px).numpy()
# If the shift does not contain any gt-bbox area, skip this
# image.
if not valid_inds.any():
return results
bboxes = bboxes[valid_inds]
results['gt_bboxes'] = bboxes
results['gt_bboxes_labels'] = results['gt_bboxes_labels'][
valid_inds]
if results.get('gt_ignore_flags', None) is not None:
results['gt_ignore_flags'] = \
results['gt_ignore_flags'][valid_inds]
# shift img
img = results['img']
new_img = np.zeros_like(img)
img_h, img_w = img.shape[:2]
new_h = img_h - np.abs(random_shift_y)
new_w = img_w - np.abs(random_shift_x)
new_img[new_y:new_y + new_h, new_x:new_x + new_w] \
= img[ori_y:ori_y + new_h, ori_x:ori_x + new_w]
results['img'] = new_img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, '
repr_str += f'max_shift_px={self.max_shift_px}, '
repr_str += f'filter_thr_px={self.filter_thr_px})'
return repr_str
@TRANSFORMS.register_module()
class Pad(MMCV_Pad):
"""Pad the image & segmentation map.
There are three padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number. and (3)pad to square. Also,
pad to square and pad to the minimum size can be used as the same time.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_masks
- gt_seg_map
Added Keys:
- pad_shape
- pad_fixed_size
- pad_size_divisor
Args:
size (tuple, optional): Fixed padding size.
Expected padding shape (width, height). Defaults to None.
size_divisor (int, optional): The divisor of padded size. Defaults to
None.
pad_to_square (bool): Whether to pad the image into a square.
Currently only used for YOLOX. Defaults to False.
pad_val (Number | dict[str, Number], optional) - Padding value for if
the pad_mode is "constant". If it is a single number, the value
to pad the image is the number and to pad the semantic
segmentation map is 255. If it is a dict, it should have the
following keys:
- img: The value to pad the image.
- seg: The value to pad the semantic segmentation map.
Defaults to dict(img=0, seg=255).
padding_mode (str): Type of padding. Should be: constant, edge,
reflect or symmetric. Defaults to 'constant'.
- constant: pads with a constant value, this value is specified
with pad_val.
- edge: pads with the last value at the edge of the image.
- reflect: pads with reflection of image without repeating the last
value on the edge. For example, padding [1, 2, 3, 4] with 2
elements on both sides in reflect mode will result in
[3, 2, 1, 2, 3, 4, 3, 2].
- symmetric: pads with reflection of image repeating the last value
on the edge. For example, padding [1, 2, 3, 4] with 2 elements on
both sides in symmetric mode will result in
[2, 1, 1, 2, 3, 4, 4, 3]
"""
def _pad_masks(self, results: dict) -> None:
"""Pad masks according to ``results['pad_shape']``."""
if results.get('gt_masks', None) is not None:
pad_val = self.pad_val.get('masks', 0)
pad_shape = results['pad_shape'][:2]
results['gt_masks'] = results['gt_masks'].pad(
pad_shape, pad_val=pad_val)
def transform(self, results: dict) -> dict:
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
self._pad_masks(results)
return results
@TRANSFORMS.register_module()
class RandomCrop(BaseTransform):
"""Random crop the image & bboxes & masks.
The absolute ``crop_size`` is sampled based on ``crop_type`` and
``image_size``, then the cropped results are generated.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_ignore_flags (bool) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_masks (optional)
- gt_ignore_flags (optional)
- gt_seg_map (optional)
Added Keys:
- homography_matrix
Args:
crop_size (tuple): The relative ratio or absolute pixels of
(width, height).
crop_type (str, optional): One of "relative_range", "relative",
"absolute", "absolute_range". "relative" randomly crops
(h * crop_size[0], w * crop_size[1]) part from an input of size
(h, w). "relative_range" uniformly samples relative crop size from
range [crop_size[0], 1] and [crop_size[1], 1] for height and width
respectively. "absolute" crops from an input with absolute size
(crop_size[0], crop_size[1]). "absolute_range" uniformly samples
crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
in range [crop_size[0], min(w, crop_size[1])].
Defaults to "absolute".
allow_negative_crop (bool, optional): Whether to allow a crop that does
not contain any bbox area. Defaults to False.
recompute_bbox (bool, optional): Whether to re-compute the boxes based
on cropped instance masks. Defaults to False.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- If the image is smaller than the absolute crop size, return the
original image.
- The keys for bboxes, labels and masks must be aligned. That is,
``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and
``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and
``gt_masks_ignore``.
- If the crop does not contain any gt-bbox region and
``allow_negative_crop`` is set to False, skip this image.
"""
def __init__(self,
crop_size: tuple,
crop_type: str = 'absolute',
allow_negative_crop: bool = False,
recompute_bbox: bool = False,
bbox_clip_border: bool = True) -> None:
if crop_type not in [
'relative_range', 'relative', 'absolute', 'absolute_range'
]:
raise ValueError(f'Invalid crop_type {crop_type}.')
if crop_type in ['absolute', 'absolute_range']:
assert crop_size[0] > 0 and crop_size[1] > 0
assert isinstance(crop_size[0], int) and isinstance(
crop_size[1], int)
if crop_type == 'absolute_range':
assert crop_size[0] <= crop_size[1]
else:
assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
self.crop_size = crop_size
self.crop_type = crop_type
self.allow_negative_crop = allow_negative_crop
self.bbox_clip_border = bbox_clip_border
self.recompute_bbox = recompute_bbox
def _crop_data(self, results: dict, crop_size: Tuple[int, int],
allow_negative_crop: bool) -> Union[dict, None]:
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
crop_size (Tuple[int, int]): Expected absolute size after
cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area.
Returns:
results (Union[dict, None]): Randomly cropped results, 'img_shape'
key in result dict is updated according to crop size. None will
be returned when there is no valid bbox after cropping.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
img = results['img']
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h, offset_w = self._rand_offset((margin_h, margin_w))
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# Record the homography matrix for the RandomCrop
homography_matrix = np.array(
[[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],
dtype=np.float32)
if results.get('homography_matrix', None) is None:
results['homography_matrix'] = homography_matrix
else:
results['homography_matrix'] = homography_matrix @ results[
'homography_matrix']
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape[:2]
# crop bboxes accordingly and clip to the image boundary
if results.get('gt_bboxes', None) is not None:
bboxes = results['gt_bboxes']
bboxes.translate_([-offset_w, -offset_h])
if self.bbox_clip_border:
bboxes.clip_(img_shape[:2])
valid_inds = bboxes.is_inside(img_shape[:2]).numpy()
# If the crop does not contain any gt-bbox area and
# allow_negative_crop is False, skip this image.
if (not valid_inds.any() and not allow_negative_crop):
return None
results['gt_bboxes'] = bboxes[valid_inds]
if results.get('gt_ignore_flags', None) is not None:
results['gt_ignore_flags'] = \
results['gt_ignore_flags'][valid_inds]
if results.get('gt_bboxes_labels', None) is not None:
results['gt_bboxes_labels'] = \
results['gt_bboxes_labels'][valid_inds]
if results.get('gt_masks', None) is not None:
results['gt_masks'] = results['gt_masks'][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
if self.recompute_bbox:
results['gt_bboxes'] = results['gt_masks'].get_bboxes(
type(results['gt_bboxes']))
# crop semantic seg
if results.get('gt_seg_map', None) is not None:
results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,
crop_x1:crop_x2]
return results
@cache_randomness
def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:
"""Randomly generate crop offset.
Args:
margin (Tuple[int, int]): The upper bound for the offset generated
randomly.
Returns:
Tuple[int, int]: The random offset for the crop.
"""
margin_h, margin_w = margin
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
return offset_h, offset_w
@cache_randomness
def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:
"""Randomly generates the absolute crop size based on `crop_type` and
`image_size`.
Args:
image_size (Tuple[int, int]): (h, w).
Returns:
crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.
"""
h, w = image_size
if self.crop_type == 'absolute':
return min(self.crop_size[1], h), min(self.crop_size[0], w)
elif self.crop_type == 'absolute_range':
crop_h = np.random.randint(
min(h, self.crop_size[0]),
min(h, self.crop_size[1]) + 1)
crop_w = np.random.randint(
min(w, self.crop_size[0]),
min(w, self.crop_size[1]) + 1)
return crop_h, crop_w
elif self.crop_type == 'relative':
crop_w, crop_h = self.crop_size
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
else:
# 'relative_range'
crop_size = np.asarray(self.crop_size, dtype=np.float32)
crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
@autocast_box_type()
def transform(self, results: dict) -> Union[dict, None]:
"""Transform function to randomly crop images, bounding boxes, masks,
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
results (Union[dict, None]): Randomly cropped results, 'img_shape'
key in result dict is updated according to crop size. None will
be returned when there is no valid bbox after cropping.
"""
image_size = results['img'].shape[:2]
crop_size = self._get_crop_size(image_size)
results = self._crop_data(results, crop_size, self.allow_negative_crop)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'crop_type={self.crop_type}, '
repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
repr_str += f'recompute_bbox={self.recompute_bbox}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@TRANSFORMS.register_module()
class SegRescale(BaseTransform):
"""Rescale semantic segmentation maps.
This transform rescale the ``gt_seg_map`` according to ``scale_factor``.
Required Keys:
- gt_seg_map
Modified Keys:
- gt_seg_map
Args:
scale_factor (float): The scale factor of the final output. Defaults
to 1.
backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
"""
def __init__(self, scale_factor: float = 1, backend: str = 'cv2') -> None:
self.scale_factor = scale_factor
self.backend = backend
def transform(self, results: dict) -> dict:
"""Transform function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
if self.scale_factor != 1:
results['gt_seg_map'] = mmcv.imrescale(
results['gt_seg_map'],
self.scale_factor,
interpolation='nearest',
backend=self.backend)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(scale_factor={self.scale_factor}, '
repr_str += f'backend={self.backend})'
return repr_str
@TRANSFORMS.register_module()
class PhotoMetricDistortion(BaseTransform):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Required Keys:
- img (np.uint8)
Modified Keys:
- img (np.float32)
Args:
brightness_delta (int): delta of brightness.
contrast_range (sequence): range of contrast.
saturation_range (sequence): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta: int = 32,
contrast_range: Sequence[Number] = (0.5, 1.5),
saturation_range: Sequence[Number] = (0.5, 1.5),
hue_delta: int = 18) -> None:
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
@cache_randomness
def _random_flags(self) -> Sequence[Number]:
mode = random.randint(2)
brightness_flag = random.randint(2)
contrast_flag = random.randint(2)
saturation_flag = random.randint(2)
hue_flag = random.randint(2)
swap_flag = random.randint(2)
delta_value = random.uniform(-self.brightness_delta,
self.brightness_delta)
alpha_value = random.uniform(self.contrast_lower, self.contrast_upper)
saturation_value = random.uniform(self.saturation_lower,
self.saturation_upper)
hue_value = random.uniform(-self.hue_delta, self.hue_delta)
swap_value = random.permutation(3)
return (mode, brightness_flag, contrast_flag, saturation_flag,
hue_flag, swap_flag, delta_value, alpha_value,
saturation_value, hue_value, swap_value)
def transform(self, results: dict) -> dict:
"""Transform function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
assert 'img' in results, '`img` is not found in results'
img = results['img']
img = img.astype(np.float32)
(mode, brightness_flag, contrast_flag, saturation_flag, hue_flag,
swap_flag, delta_value, alpha_value, saturation_value, hue_value,
swap_value) = self._random_flags()
# random brightness
if brightness_flag:
img += delta_value
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
if mode == 1:
if contrast_flag:
img *= alpha_value
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if saturation_flag:
img[..., 1] *= saturation_value
# For image(type=float32), after convert bgr to hsv by opencv,
# valid saturation value range is [0, 1]
if saturation_value > 1:
img[..., 1] = img[..., 1].clip(0, 1)
# random hue
if hue_flag:
img[..., 0] += hue_value
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if contrast_flag:
img *= alpha_value
# randomly swap channels
if swap_flag:
img = img[..., swap_value]
results['img'] = img
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(brightness_delta={self.brightness_delta}, '
repr_str += 'contrast_range='
repr_str += f'{(self.contrast_lower, self.contrast_upper)}, '
repr_str += 'saturation_range='
repr_str += f'{(self.saturation_lower, self.saturation_upper)}, '
repr_str += f'hue_delta={self.hue_delta})'
return repr_str
@TRANSFORMS.register_module()
class Expand(BaseTransform):
"""Random expand the image & bboxes & masks & segmentation map.
Randomly place the original image on a canvas of ``ratio`` x original image
size filled with mean values. The ratio is in the range of ratio_range.
Required Keys:
- img
- img_shape
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes
- gt_masks
- gt_seg_map
Args:
mean (sequence): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (sequence)): range of expand ratio.
seg_ignore_label (int): label of ignore segmentation map.
prob (float): probability of applying this transformation
"""
def __init__(self,
mean: Sequence[Number] = (0, 0, 0),
to_rgb: bool = True,
ratio_range: Sequence[Number] = (1, 4),
seg_ignore_label: int = None,
prob: float = 0.5) -> None:
self.to_rgb = to_rgb
self.ratio_range = ratio_range
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
self.seg_ignore_label = seg_ignore_label
self.prob = prob
@cache_randomness
def _random_prob(self) -> float:
return random.uniform(0, 1)
@cache_randomness
def _random_ratio(self) -> float:
return random.uniform(self.min_ratio, self.max_ratio)
@cache_randomness
def _random_left_top(self, ratio: float, h: int,
w: int) -> Tuple[int, int]:
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
return left, top
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to expand images, bounding boxes, masks,
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images, bounding boxes, masks, segmentation
map expanded.
"""
if self._random_prob() > self.prob:
return results
assert 'img' in results, '`img` is not found in results'
img = results['img']
h, w, c = img.shape
ratio = self._random_ratio()
# speedup expand when meets large image
if np.all(self.mean == self.mean[0]):
expand_img = np.empty((int(h * ratio), int(w * ratio), c),
img.dtype)
expand_img.fill(self.mean[0])
else:
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean,
dtype=img.dtype)
left, top = self._random_left_top(ratio, h, w)
expand_img[top:top + h, left:left + w] = img
results['img'] = expand_img
results['img_shape'] = expand_img.shape[:2]
# expand bboxes
if results.get('gt_bboxes', None) is not None:
results['gt_bboxes'].translate_([left, top])
# expand masks
if results.get('gt_masks', None) is not None:
results['gt_masks'] = results['gt_masks'].expand(
int(h * ratio), int(w * ratio), top, left)
# expand segmentation map
if results.get('gt_seg_map', None) is not None:
gt_seg = results['gt_seg_map']
expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
self.seg_ignore_label,
dtype=gt_seg.dtype)
expand_gt_seg[top:top + h, left:left + w] = gt_seg
results['gt_seg_map'] = expand_gt_seg
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
repr_str += f'prob={self.prob})'
return repr_str
@TRANSFORMS.register_module()
class MinIoURandomCrop(BaseTransform):
"""Random crop the image & bboxes & masks & segmentation map, the cropped
patches have minimum IoU requirement with original image & bboxes & masks.
& segmentation map, the IoU threshold is randomly selected from min_ious.
Required Keys:
- img
- img_shape
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- gt_ignore_flags (bool) (optional)
- gt_seg_map (np.uint8) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes
- gt_bboxes_labels
- gt_masks
- gt_ignore_flags
- gt_seg_map
Args:
min_ious (Sequence[float]): minimum IoU threshold for all intersections
with bounding boxes.
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
"""
def __init__(self,
min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size: float = 0.3,
bbox_clip_border: bool = True) -> None:
self.min_ious = min_ious
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
self.bbox_clip_border = bbox_clip_border
@cache_randomness
def _random_mode(self) -> Number:
return random.choice(self.sample_mode)
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to crop images and bounding boxes with minimum
IoU constraint.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images and bounding boxes cropped, \
'img_shape' key is updated.
"""
assert 'img' in results, '`img` is not found in results'
assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results'
img = results['img']
boxes = results['gt_bboxes']
h, w, c = img.shape
while True:
mode = self._random_mode()
self.mode = mode
if mode == 1:
return results
min_iou = self.mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
# Line or point crop is not allowed
if patch[2] == patch[0] or patch[3] == patch[1]:
continue
overlaps = boxes.overlaps(
HorizontalBoxes(patch.reshape(-1, 4).astype(np.float32)),
boxes).numpy().reshape(-1)
if len(overlaps) > 0 and overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
if len(overlaps) > 0:
# adjust boxes
def is_center_of_bboxes_in_patch(boxes, patch):
centers = boxes.centers.numpy()
mask = ((centers[:, 0] > patch[0]) *
(centers[:, 1] > patch[1]) *
(centers[:, 0] < patch[2]) *
(centers[:, 1] < patch[3]))
return mask
mask = is_center_of_bboxes_in_patch(boxes, patch)
if not mask.any():
continue
if results.get('gt_bboxes', None) is not None:
boxes = results['gt_bboxes']
mask = is_center_of_bboxes_in_patch(boxes, patch)
boxes = boxes[mask]
boxes.translate_([-patch[0], -patch[1]])
if self.bbox_clip_border:
boxes.clip_(
[patch[3] - patch[1], patch[2] - patch[0]])
results['gt_bboxes'] = boxes
# ignore_flags
if results.get('gt_ignore_flags', None) is not None:
results['gt_ignore_flags'] = \
results['gt_ignore_flags'][mask]
# labels
if results.get('gt_bboxes_labels', None) is not None:
results['gt_bboxes_labels'] = results[
'gt_bboxes_labels'][mask]
# mask fields
if results.get('gt_masks', None) is not None:
results['gt_masks'] = results['gt_masks'][
mask.nonzero()[0]].crop(patch)
# adjust the img no matter whether the gt is empty before crop
img = img[patch[1]:patch[3], patch[0]:patch[2]]
results['img'] = img
results['img_shape'] = img.shape[:2]
# seg fields
if results.get('gt_seg_map', None) is not None:
results['gt_seg_map'] = results['gt_seg_map'][
patch[1]:patch[3], patch[0]:patch[2]]
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(min_ious={self.min_ious}, '
repr_str += f'min_crop_size={self.min_crop_size}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@TRANSFORMS.register_module()
class Corrupt(BaseTransform):
"""Corruption augmentation.
Corruption transforms implemented based on
`imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
Required Keys:
- img (np.uint8)
Modified Keys:
- img (np.uint8)
Args:
corruption (str): Corruption name.
severity (int): The severity of corruption. Defaults to 1.
"""
def __init__(self, corruption: str, severity: int = 1) -> None:
self.corruption = corruption
self.severity = severity
def transform(self, results: dict) -> dict:
"""Call function to corrupt image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images corrupted.
"""
if corrupt is None:
raise RuntimeError('imagecorruptions is not installed')
results['img'] = corrupt(
results['img'].astype(np.uint8),
corruption_name=self.corruption,
severity=self.severity)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(corruption={self.corruption}, '
repr_str += f'severity={self.severity})'
return repr_str
@TRANSFORMS.register_module()
@avoid_cache_randomness
class Albu(BaseTransform):
"""Albumentation augmentation.
Adds custom transformations from Albumentations library.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
Required Keys:
- img (np.uint8)
- gt_bboxes (HorizontalBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
Modified Keys:
- img (np.uint8)
- gt_bboxes (HorizontalBoxes[torch.float32]) (optional)
- gt_masks (BitmapMasks | PolygonMasks) (optional)
- img_shape (tuple)
An example of ``transforms`` is as followed:
.. code-block::
[
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
Args:
transforms (list[dict]): A list of albu transformations
bbox_params (dict, optional): Bbox_params for albumentation `Compose`
keymap (dict, optional): Contains
{'input key':'albumentation-style key'}
skip_img_without_anno (bool): Whether to skip the image if no ann left
after aug. Defaults to False.
"""
def __init__(self,
transforms: List[dict],
bbox_params: Optional[dict] = None,
keymap: Optional[dict] = None,
skip_img_without_anno: bool = False) -> None:
if Compose is None:
raise RuntimeError('albumentations is not installed')
# Args will be modified later, copying it will be safer
transforms = copy.deepcopy(transforms)
if bbox_params is not None:
bbox_params = copy.deepcopy(bbox_params)
if keymap is not None:
keymap = copy.deepcopy(keymap)
self.transforms = transforms
self.filter_lost_elements = False
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
def albu_builder(self, cfg: dict) -> albumentations:
"""Import a module from albumentations.
It inherits some of :func:`build_from_cfg` logic.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if is_str(obj_type):
if albumentations is None:
raise RuntimeError('albumentations is not installed')
obj_cls = getattr(albumentations, obj_type)
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'transforms' in args:
args['transforms'] = [
self.albu_builder(transform)
for transform in args['transforms']
]
return obj_cls(**args)
@staticmethod
def mapper(d: dict, keymap: dict) -> dict:
"""Dictionary mapper. Renames keys according to keymap provided.
Args:
d (dict): old dict
keymap (dict): {'old_key':'new_key'}
Returns:
dict: new dict.
"""
updated_dict = {}
for k, v in zip(d.keys(), d.values()):
new_k = keymap.get(k, k)
updated_dict[new_k] = d[k]
return updated_dict
@autocast_box_type()
def transform(self, results: dict) -> Union[dict, None]:
"""Transform function of Albu."""
# TODO: gt_seg_map is not currently supported
# dict to albumentations format
results = self.mapper(results, self.keymap_to_albu)
results, ori_masks = self._preprocess_results(results)
results = self.aug(**results)
results = self._postprocess_results(results, ori_masks)
if results is None:
return None
# back to the original format
results = self.mapper(results, self.keymap_back)
results['img_shape'] = results['img'].shape[:2]
return results
def _preprocess_results(self, results: dict) -> tuple:
"""Pre-processing results to facilitate the use of Albu."""
if 'bboxes' in results:
# to list of boxes
if not isinstance(results['bboxes'], HorizontalBoxes):
raise NotImplementedError(
'Albu only supports horizontal boxes now')
bboxes = results['bboxes'].numpy()
results['bboxes'] = [x for x in bboxes]
# add pseudo-field for filtration
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
# TODO: Support mask structure in albu
ori_masks = None
if 'masks' in results:
if isinstance(results['masks'], PolygonMasks):
raise NotImplementedError(
'Albu only supports BitMap masks now')
ori_masks = results['masks']
if albumentations.__version__ < '0.5':
results['masks'] = results['masks'].masks
else:
results['masks'] = [mask for mask in results['masks'].masks]
return results, ori_masks
def _postprocess_results(
self,
results: dict,
ori_masks: Optional[Union[BitmapMasks,
PolygonMasks]] = None) -> dict:
"""Post-processing Albu output."""
# albumentations may return np.array or list on different versions
if 'gt_bboxes_labels' in results and isinstance(
results['gt_bboxes_labels'], list):
results['gt_bboxes_labels'] = np.array(
results['gt_bboxes_labels'], dtype=np.int64)
if 'gt_ignore_flags' in results and isinstance(
results['gt_ignore_flags'], list):
results['gt_ignore_flags'] = np.array(
results['gt_ignore_flags'], dtype=bool)
if 'bboxes' in results:
if isinstance(results['bboxes'], list):
results['bboxes'] = np.array(
results['bboxes'], dtype=np.float32)
results['bboxes'] = results['bboxes'].reshape(-1, 4)
results['bboxes'] = HorizontalBoxes(results['bboxes'])
# filter label_fields
if self.filter_lost_elements:
for label in self.origin_label_fields:
results[label] = np.array(
[results[label][i] for i in results['idx_mapper']])
if 'masks' in results:
assert ori_masks is not None
results['masks'] = np.array(
[results['masks'][i] for i in results['idx_mapper']])
results['masks'] = ori_masks.__class__(
results['masks'], ori_masks.height, ori_masks.width)
if (not len(results['idx_mapper'])
and self.skip_img_without_anno):
return None
elif 'masks' in results:
results['masks'] = ori_masks.__class__(results['masks'],
ori_masks.height,
ori_masks.width)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
return repr_str
@TRANSFORMS.register_module()
@avoid_cache_randomness
class RandomCenterCropPad(BaseTransform):
"""Random center crop and random around padding for CornerNet.
This operation generates randomly cropped image from the original image and
pads it simultaneously. Different from :class:`RandomCrop`, the output
shape may not equal to ``crop_size`` strictly. We choose a random value
from ``ratios`` and the output shape could be larger or smaller than
``crop_size``. The padding operation is also different from :class:`Pad`,
here we use around padding instead of right-bottom padding.
The relation between output image (padding image) and original image:
.. code:: text
output image
+----------------------------+
| padded area |
+------|----------------------------|----------+
| | cropped area | |
| | +---------------+ | |
| | | . center | | | original image
| | | range | | |
| | +---------------+ | |
+------|----------------------------|----------+
| padded area |
+----------------------------+
There are 5 main areas in the figure:
- output image: output image of this operation, also called padding
image in following instruction.
- original image: input image of this operation.
- padded area: non-intersect area of output image and original image.
- cropped area: the overlap of output image and original image.
- center range: a smaller area where random center chosen from.
center range is computed by ``border`` and original image's shape
to avoid our random center is too close to original image's border.
Also this operation act differently in train and test mode, the summary
pipeline is listed below.
Train pipeline:
1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
will be ``random_ratio * crop_size``.
2. Choose a ``random_center`` in center range.
3. Generate padding image with center matches the ``random_center``.
4. Initialize the padding image with pixel value equals to ``mean``.
5. Copy the cropped area to padding image.
6. Refine annotations.
Test pipeline:
1. Compute output shape according to ``test_pad_mode``.
2. Generate padding image with center matches the original image
center.
3. Initialize the padding image with pixel value equals to ``mean``.
4. Copy the ``cropped area`` to padding image.
Required Keys:
- img (np.float32)
- img_shape (tuple)
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
Modified Keys:
- img (np.float32)
- img_shape (tuple)
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
Args:
crop_size (tuple, optional): expected size after crop, final size will
computed according to ratio. Requires (width, height)
in train mode, and None in test mode.
ratios (tuple, optional): random select a ratio from tuple and crop
image to (crop_size[0] * ratio) * (crop_size[1] * ratio).
Only available in train mode. Defaults to (0.9, 1.0, 1.1).
border (int, optional): max distance from center select area to image
border. Only available in train mode. Defaults to 128.
mean (sequence, optional): Mean values of 3 channels.
std (sequence, optional): Std values of 3 channels.
to_rgb (bool, optional): Whether to convert the image from BGR to RGB.
test_mode (bool): whether involve random variables in transform.
In train mode, crop_size is fixed, center coords and ratio is
random selected from predefined lists. In test mode, crop_size
is image's original shape, center coords and ratio is fixed.
Defaults to False.
test_pad_mode (tuple, optional): padding method and padding shape
value, only available in test mode. Default is using
'logical_or' with 127 as padding shape value.
- 'logical_or': final_shape = input_shape | padding_shape_value
- 'size_divisor': final_shape = int(
ceil(input_shape / padding_shape_value) * padding_shape_value)
Defaults to ('logical_or', 127).
test_pad_add_pix (int): Extra padding pixel in test mode.
Defaults to 0.
bbox_clip_border (bool): Whether clip the objects outside
the border of the image. Defaults to True.
"""
def __init__(self,
crop_size: Optional[tuple] = None,
ratios: Optional[tuple] = (0.9, 1.0, 1.1),
border: Optional[int] = 128,
mean: Optional[Sequence] = None,
std: Optional[Sequence] = None,
to_rgb: Optional[bool] = None,
test_mode: bool = False,
test_pad_mode: Optional[tuple] = ('logical_or', 127),
test_pad_add_pix: int = 0,
bbox_clip_border: bool = True) -> None:
if test_mode:
assert crop_size is None, 'crop_size must be None in test mode'
assert ratios is None, 'ratios must be None in test mode'
assert border is None, 'border must be None in test mode'
assert isinstance(test_pad_mode, (list, tuple))
assert test_pad_mode[0] in ['logical_or', 'size_divisor']
else:
assert isinstance(crop_size, (list, tuple))
assert crop_size[0] > 0 and crop_size[1] > 0, (
'crop_size must > 0 in train mode')
assert isinstance(ratios, (list, tuple))
assert test_pad_mode is None, (
'test_pad_mode must be None in train mode')
self.crop_size = crop_size
self.ratios = ratios
self.border = border
# We do not set default value to mean, std and to_rgb because these
# hyper-parameters are easy to forget but could affect the performance.
# Please use the same setting as Normalize for performance assurance.
assert mean is not None and std is not None and to_rgb is not None
self.to_rgb = to_rgb
self.input_mean = mean
self.input_std = std
if to_rgb:
self.mean = mean[::-1]
self.std = std[::-1]
else:
self.mean = mean
self.std = std
self.test_mode = test_mode
self.test_pad_mode = test_pad_mode
self.test_pad_add_pix = test_pad_add_pix
self.bbox_clip_border = bbox_clip_border
def _get_border(self, border, size):
"""Get final border for the target size.
This function generates a ``final_border`` according to image's shape.
The area between ``final_border`` and ``size - final_border`` is the
``center range``. We randomly choose center from the ``center range``
to avoid our random center is too close to original image's border.
Also ``center range`` should be larger than 0.
Args:
border (int): The initial border, default is 128.
size (int): The width or height of original image.
Returns:
int: The final border.
"""
k = 2 * border / size
i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
return border // i
def _filter_boxes(self, patch, boxes):
"""Check whether the center of each box is in the patch.
Args:
patch (list[int]): The cropped area, [left, top, right, bottom].
boxes (numpy array, (N x 4)): Ground truth boxes.
Returns:
mask (numpy array, (N,)): Each box is inside or outside the patch.
"""
center = boxes.centers.numpy()
mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
return mask
def _crop_image_and_paste(self, image, center, size):
"""Crop image with a given center and size, then paste the cropped
image to a blank image with two centers align.
This function is equivalent to generating a blank image with ``size``
as its shape. Then cover it on the original image with two centers (
the center of blank image and the random center of original image)
aligned. The overlap area is paste from the original image and the
outside area is filled with ``mean pixel``.
Args:
image (np array, H x W x C): Original image.
center (list[int]): Target crop center coord.
size (list[int]): Target crop size. [target_h, target_w]
Returns:
cropped_img (np array, target_h x target_w x C): Cropped image.
border (np array, 4): The distance of four border of
``cropped_img`` to the original image area, [top, bottom,
left, right]
patch (list[int]): The cropped area, [left, top, right, bottom].
"""
center_y, center_x = center
target_h, target_w = size
img_h, img_w, img_c = image.shape
x0 = max(0, center_x - target_w // 2)
x1 = min(center_x + target_w // 2, img_w)
y0 = max(0, center_y - target_h // 2)
y1 = min(center_y + target_h // 2, img_h)
patch = np.array((int(x0), int(y0), int(x1), int(y1)))
left, right = center_x - x0, x1 - center_x
top, bottom = center_y - y0, y1 - center_y
cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
for i in range(img_c):
cropped_img[:, :, i] += self.mean[i]
y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
x_slice = slice(cropped_center_x - left, cropped_center_x + right)
cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_center_y - top, cropped_center_y + bottom,
cropped_center_x - left, cropped_center_x + right
],
dtype=np.float32)
return cropped_img, border, patch
def _train_aug(self, results):
"""Random crop and around padding the original image.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
gt_bboxes = results['gt_bboxes']
while True:
scale = random.choice(self.ratios)
new_h = int(self.crop_size[1] * scale)
new_w = int(self.crop_size[0] * scale)
h_border = self._get_border(self.border, h)
w_border = self._get_border(self.border, w)
for i in range(50):
center_x = random.randint(low=w_border, high=w - w_border)
center_y = random.randint(low=h_border, high=h - h_border)
cropped_img, border, patch = self._crop_image_and_paste(
img, [center_y, center_x], [new_h, new_w])
if len(gt_bboxes) == 0:
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape[:2]
return results
# if image do not have valid bbox, any crop patch is valid.
mask = self._filter_boxes(patch, gt_bboxes)
if not mask.any():
continue
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape[:2]
x0, y0, x1, y1 = patch
left_w, top_h = center_x - x0, center_y - y0
cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
# crop bboxes accordingly and clip to the image boundary
gt_bboxes = gt_bboxes[mask]
gt_bboxes.translate_([
cropped_center_x - left_w - x0,
cropped_center_y - top_h - y0
])
if self.bbox_clip_border:
gt_bboxes.clip_([new_h, new_w])
keep = gt_bboxes.is_inside([new_h, new_w]).numpy()
gt_bboxes = gt_bboxes[keep]
results['gt_bboxes'] = gt_bboxes
# ignore_flags
if results.get('gt_ignore_flags', None) is not None:
gt_ignore_flags = results['gt_ignore_flags'][mask]
results['gt_ignore_flags'] = \
gt_ignore_flags[keep]
# labels
if results.get('gt_bboxes_labels', None) is not None:
gt_labels = results['gt_bboxes_labels'][mask]
results['gt_bboxes_labels'] = gt_labels[keep]
if 'gt_masks' in results or 'gt_seg_map' in results:
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
return results
def _test_aug(self, results):
"""Around padding the original image without cropping.
The padding mode and value are from ``test_pad_mode``.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
if self.test_pad_mode[0] in ['logical_or']:
# self.test_pad_add_pix is only used for centernet
target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix
target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix
elif self.test_pad_mode[0] in ['size_divisor']:
divisor = self.test_pad_mode[1]
target_h = int(np.ceil(h / divisor)) * divisor
target_w = int(np.ceil(w / divisor)) * divisor
else:
raise NotImplementedError(
'RandomCenterCropPad only support two testing pad mode:'
'logical-or and size_divisor.')
cropped_img, border, _ = self._crop_image_and_paste(
img, [h // 2, w // 2], [target_h, target_w])
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape[:2]
results['border'] = border
return results
@autocast_box_type()
def transform(self, results: dict) -> dict:
img = results['img']
assert img.dtype == np.float32, (
'RandomCenterCropPad needs the input image of dtype np.float32,'
' please set "to_float32=True" in "LoadImageFromFile" pipeline')
h, w, c = img.shape
assert c == len(self.mean)
if self.test_mode:
return self._test_aug(results)
else:
return self._train_aug(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'ratios={self.ratios}, '
repr_str += f'border={self.border}, '
repr_str += f'mean={self.input_mean}, '
repr_str += f'std={self.input_std}, '
repr_str += f'to_rgb={self.to_rgb}, '
repr_str += f'test_mode={self.test_mode}, '
repr_str += f'test_pad_mode={self.test_pad_mode}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@TRANSFORMS.register_module()
class CutOut(BaseTransform):
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Required Keys:
- img
Modified Keys:
- img
Args:
n_holes (int or tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [``n_holes[0]``, ``n_holes[1]``].
cutout_shape (tuple[int, int] or list[tuple[int, int]], optional):
The candidate shape of dropped regions. It can be
``tuple[int, int]`` to use a fixed cutout shape, or
``list[tuple[int, int]]`` to randomly choose shape
from the list. Defaults to None.
cutout_ratio (tuple[float, float] or list[tuple[float, float]],
optional): The candidate ratio of dropped regions. It can be
``tuple[float, float]`` to use a fixed ratio or
``list[tuple[float, float]]`` to randomly choose ratio
from the list. Please note that ``cutout_shape`` and
``cutout_ratio`` cannot be both given at the same time.
Defaults to None.
fill_in (tuple[float, float, float] or tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Defaults to (0, 0, 0).
"""
def __init__(
self,
n_holes: Union[int, Tuple[int, int]],
cutout_shape: Optional[Union[Tuple[int, int],
List[Tuple[int, int]]]] = None,
cutout_ratio: Optional[Union[Tuple[float, float],
List[Tuple[float, float]]]] = None,
fill_in: Union[Tuple[float, float, float], Tuple[int, int,
int]] = (0, 0, 0)
) -> None:
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.fill_in = fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
if not isinstance(self.candidates, list):
self.candidates = [self.candidates]
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Call function to drop some regions of image."""
h, w, c = results['img'].shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
for _ in range(n_holes):
x1 = np.random.randint(0, w)
y1 = np.random.randint(0, h)
index = np.random.randint(0, len(self.candidates))
if not self.with_ratio:
cutout_w, cutout_h = self.candidates[index]
else:
cutout_w = int(self.candidates[index][0] * w)
cutout_h = int(self.candidates[index][1] * h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
results['img'][y1:y2, x1:x2, :] = self.fill_in
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in})'
return repr_str
@TRANSFORMS.register_module()
class Mosaic(BaseTransform):
"""Mosaic augmentation.
Given 4 images, mosaic transform combines them into
one output image. The output image is composed of the parts from each sub-
image.
.. code:: text
mosaic transform
center_x
+------------------------------+
| pad | pad |
| +-----------+ |
| | | |
| | image1 |--------+ |
| | | | |
| | | image2 | |
center_y |----+-------------+-----------|
| | cropped | |
|pad | image3 | image4 |
| | | |
+----|-------------+-----------+
| |
+-------------+
The mosaic transform steps are as follows:
1. Choose the mosaic center as the intersections of 4 images
2. Get the left top image according to the index, and randomly
sample another 3 images from the custom dataset.
3. Sub image will be cropped if image is larger than mosaic patch
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image size after mosaic pipeline of single
image. The shape order should be (width, height).
Defaults to (640, 640).
center_ratio_range (Sequence[float]): Center ratio range of mosaic
output. Defaults to (0.5, 1.5).
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
pad_val (int): Pad value. Defaults to 114.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
"""
def __init__(self,
img_scale: Tuple[int, int] = (640, 640),
center_ratio_range: Tuple[float, float] = (0.5, 1.5),
bbox_clip_border: bool = True,
pad_val: float = 114.0,
prob: float = 1.0) -> None:
assert isinstance(img_scale, tuple)
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \
f'got {prob}.'
log_img_scale(img_scale, skip_square=True, shape_order='wh')
self.img_scale = img_scale
self.center_ratio_range = center_ratio_range
self.bbox_clip_border = bbox_clip_border
self.pad_val = pad_val
self.prob = prob
@cache_randomness
def get_indexes(self, dataset: BaseDataset) -> int:
"""Call function to collect indexes.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
list: indexes.
"""
indexes = [random.randint(0, len(dataset)) for _ in range(3)]
return indexes
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Mosaic transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
if random.uniform(0, 1) > self.prob:
return results
assert 'mix_results' in results
mosaic_bboxes = []
mosaic_bboxes_labels = []
mosaic_ignore_flags = []
if len(results['img'].shape) == 3:
mosaic_img = np.full(
(int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),
self.pad_val,
dtype=results['img'].dtype)
else:
mosaic_img = np.full(
(int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),
self.pad_val,
dtype=results['img'].dtype)
# mosaic center x, y
center_x = int(
random.uniform(*self.center_ratio_range) * self.img_scale[0])
center_y = int(
random.uniform(*self.center_ratio_range) * self.img_scale[1])
center_position = (center_x, center_y)
loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')
for i, loc in enumerate(loc_strs):
if loc == 'top_left':
results_patch = copy.deepcopy(results)
else:
results_patch = copy.deepcopy(results['mix_results'][i - 1])
img_i = results_patch['img']
h_i, w_i = img_i.shape[:2]
# keep_ratio resize
scale_ratio_i = min(self.img_scale[1] / h_i,
self.img_scale[0] / w_i)
img_i = mmcv.imresize(
img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))
# compute the combine parameters
paste_coord, crop_coord = self._mosaic_combine(
loc, center_position, img_i.shape[:2][::-1])
x1_p, y1_p, x2_p, y2_p = paste_coord
x1_c, y1_c, x2_c, y2_c = crop_coord
# crop and paste image
mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]
# adjust coordinate
gt_bboxes_i = results_patch['gt_bboxes']
gt_bboxes_labels_i = results_patch['gt_bboxes_labels']
gt_ignore_flags_i = results_patch['gt_ignore_flags']
padw = x1_p - x1_c
padh = y1_p - y1_c
gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])
gt_bboxes_i.translate_([padw, padh])
mosaic_bboxes.append(gt_bboxes_i)
mosaic_bboxes_labels.append(gt_bboxes_labels_i)
mosaic_ignore_flags.append(gt_ignore_flags_i)
mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)
mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)
mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)
if self.bbox_clip_border:
mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])
# remove outside bboxes
inside_inds = mosaic_bboxes.is_inside(
[2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()
mosaic_bboxes = mosaic_bboxes[inside_inds]
mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]
mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]
results['img'] = mosaic_img
results['img_shape'] = mosaic_img.shape[:2]
results['gt_bboxes'] = mosaic_bboxes
results['gt_bboxes_labels'] = mosaic_bboxes_labels
results['gt_ignore_flags'] = mosaic_ignore_flags
return results
def _mosaic_combine(
self, loc: str, center_position_xy: Sequence[float],
img_shape_wh: Sequence[int]) -> Tuple[Tuple[int], Tuple[int]]:
"""Calculate global coordinate of mosaic image and local coordinate of
cropped sub-image.
Args:
loc (str): Index for the sub-image, loc in ('top_left',
'top_right', 'bottom_left', 'bottom_right').
center_position_xy (Sequence[float]): Mixing center for 4 images,
(x, y).
img_shape_wh (Sequence[int]): Width and height of sub-image
Returns:
tuple[tuple[float]]: Corresponding coordinate of pasting and
cropping
- paste_coord (tuple): paste corner coordinate in mosaic image.
- crop_coord (tuple): crop corner coordinate in mosaic image.
"""
assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')
if loc == 'top_left':
# index0 to top left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
max(center_position_xy[1] - img_shape_wh[1], 0), \
center_position_xy[0], \
center_position_xy[1]
crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (
y2 - y1), img_shape_wh[0], img_shape_wh[1]
elif loc == 'top_right':
# index1 to top right part of image
x1, y1, x2, y2 = center_position_xy[0], \
max(center_position_xy[1] - img_shape_wh[1], 0), \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[0] * 2), \
center_position_xy[1]
crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(
img_shape_wh[0], x2 - x1), img_shape_wh[1]
elif loc == 'bottom_left':
# index2 to bottom left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
center_position_xy[1], \
center_position_xy[0], \
min(self.img_scale[1] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(
y2 - y1, img_shape_wh[1])
else:
# index3 to bottom right part of image
x1, y1, x2, y2 = center_position_xy[0], \
center_position_xy[1], \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[0] * 2), \
min(self.img_scale[1] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = 0, 0, min(img_shape_wh[0],
x2 - x1), min(y2 - y1, img_shape_wh[1])
paste_coord = x1, y1, x2, y2
return paste_coord, crop_coord
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'center_ratio_range={self.center_ratio_range}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob})'
return repr_str
@TRANSFORMS.register_module()
class MixUp(BaseTransform):
"""MixUp data augmentation.
.. code:: text
mixup transform
+------------------------------+
| mixup image | |
| +--------|--------+ |
| | | | |
|---------------+ | |
| | | |
| | image | |
| | | |
| | | |
| |-----------------+ |
| pad |
+------------------------------+
The mixup transform steps are as follows:
1. Another random image is picked by dataset and embedded in
the top left patch(after padding and resizing)
2. The target of mixup transform is the weighted average of mixup
image and origin image.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image output size after mixup pipeline.
The shape order should be (width, height). Defaults to (640, 640).
ratio_range (Sequence[float]): Scale ratio of mixup image.
Defaults to (0.5, 1.5).
flip_ratio (float): Horizontal flip ratio of mixup image.
Defaults to 0.5.
pad_val (int): Pad value. Defaults to 114.
max_iters (int): The maximum number of iterations. If the number of
iterations is greater than `max_iters`, but gt_bbox is still
empty, then the iteration is terminated. Defaults to 15.
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
"""
def __init__(self,
img_scale: Tuple[int, int] = (640, 640),
ratio_range: Tuple[float, float] = (0.5, 1.5),
flip_ratio: float = 0.5,
pad_val: float = 114.0,
max_iters: int = 15,
bbox_clip_border: bool = True) -> None:
assert isinstance(img_scale, tuple)
log_img_scale(img_scale, skip_square=True, shape_order='wh')
self.dynamic_scale = img_scale
self.ratio_range = ratio_range
self.flip_ratio = flip_ratio
self.pad_val = pad_val
self.max_iters = max_iters
self.bbox_clip_border = bbox_clip_border
@cache_randomness
def get_indexes(self, dataset: BaseDataset) -> int:
"""Call function to collect indexes.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
list: indexes.
"""
for i in range(self.max_iters):
index = random.randint(0, len(dataset))
gt_bboxes_i = dataset[index]['gt_bboxes']
if len(gt_bboxes_i) != 0:
break
return index
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""MixUp transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
assert 'mix_results' in results
assert len(
results['mix_results']) == 1, 'MixUp only support 2 images now !'
if results['mix_results'][0]['gt_bboxes'].shape[0] == 0:
# empty bbox
return results
retrieve_results = results['mix_results'][0]
retrieve_img = retrieve_results['img']
jit_factor = random.uniform(*self.ratio_range)
is_filp = random.uniform(0, 1) > self.flip_ratio
if len(retrieve_img.shape) == 3:
out_img = np.ones(
(self.dynamic_scale[1], self.dynamic_scale[0], 3),
dtype=retrieve_img.dtype) * self.pad_val
else:
out_img = np.ones(
self.dynamic_scale[::-1],
dtype=retrieve_img.dtype) * self.pad_val
# 1. keep_ratio resize
scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],
self.dynamic_scale[0] / retrieve_img.shape[1])
retrieve_img = mmcv.imresize(
retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),
int(retrieve_img.shape[0] * scale_ratio)))
# 2. paste
out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img
# 3. scale jit
scale_ratio *= jit_factor
out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),
int(out_img.shape[0] * jit_factor)))
# 4. flip
if is_filp:
out_img = out_img[:, ::-1, :]
# 5. random crop
ori_img = results['img']
origin_h, origin_w = out_img.shape[:2]
target_h, target_w = ori_img.shape[:2]
padded_img = np.ones((max(origin_h, target_h), max(
origin_w, target_w), 3)) * self.pad_val
padded_img = padded_img.astype(np.uint8)
padded_img[:origin_h, :origin_w] = out_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w)
padded_cropped_img = padded_img[y_offset:y_offset + target_h,
x_offset:x_offset + target_w]
# 6. adjust bbox
retrieve_gt_bboxes = retrieve_results['gt_bboxes']
retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])
if self.bbox_clip_border:
retrieve_gt_bboxes.clip_([origin_h, origin_w])
if is_filp:
retrieve_gt_bboxes.flip_([origin_h, origin_w],
direction='horizontal')
# 7. filter
cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()
cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])
if self.bbox_clip_border:
cp_retrieve_gt_bboxes.clip_([target_h, target_w])
# 8. mix up
ori_img = ori_img.astype(np.float32)
mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)
retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']
retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']
mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(
(results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)
mixup_gt_bboxes_labels = np.concatenate(
(results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)
mixup_gt_ignore_flags = np.concatenate(
(results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)
# remove outside bbox
inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()
mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]
mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]
mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]
results['img'] = mixup_img.astype(np.uint8)
results['img_shape'] = mixup_img.shape[:2]
results['gt_bboxes'] = mixup_gt_bboxes
results['gt_bboxes_labels'] = mixup_gt_bboxes_labels
results['gt_ignore_flags'] = mixup_gt_ignore_flags
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(dynamic_scale={self.dynamic_scale}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'flip_ratio={self.flip_ratio}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'max_iters={self.max_iters}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@TRANSFORMS.register_module()
class RandomAffine(BaseTransform):
"""Random affine transform data augmentation.
This operation randomly generates affine transform matrix which including
rotation, translation, shear and scaling transforms.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
max_rotate_degree (float): Maximum degrees of rotation transform.
Defaults to 10.
max_translate_ratio (float): Maximum ratio of translation.
Defaults to 0.1.
scaling_ratio_range (tuple[float]): Min and max ratio of
scaling transform. Defaults to (0.5, 1.5).
max_shear_degree (float): Maximum degrees of shear
transform. Defaults to 2.
border (tuple[int]): Distance from width and height sides of input
image to adjust output shape. Only used in mosaic dataset.
Defaults to (0, 0).
border_val (tuple[int]): Border padding values of 3 channels.
Defaults to (114, 114, 114).
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
"""
def __init__(self,
max_rotate_degree: float = 10.0,
max_translate_ratio: float = 0.1,
scaling_ratio_range: Tuple[float, float] = (0.5, 1.5),
max_shear_degree: float = 2.0,
border: Tuple[int, int] = (0, 0),
border_val: Tuple[int, int, int] = (114, 114, 114),
bbox_clip_border: bool = True) -> None:
assert 0 <= max_translate_ratio <= 1
assert scaling_ratio_range[0] <= scaling_ratio_range[1]
assert scaling_ratio_range[0] > 0
self.max_rotate_degree = max_rotate_degree
self.max_translate_ratio = max_translate_ratio
self.scaling_ratio_range = scaling_ratio_range
self.max_shear_degree = max_shear_degree
self.border = border
self.border_val = border_val
self.bbox_clip_border = bbox_clip_border
@cache_randomness
def _get_random_homography_matrix(self, height, width):
# Rotation
rotation_degree = random.uniform(-self.max_rotate_degree,
self.max_rotate_degree)
rotation_matrix = self._get_rotation_matrix(rotation_degree)
# Scaling
scaling_ratio = random.uniform(self.scaling_ratio_range[0],
self.scaling_ratio_range[1])
scaling_matrix = self._get_scaling_matrix(scaling_ratio)
# Shear
x_degree = random.uniform(-self.max_shear_degree,
self.max_shear_degree)
y_degree = random.uniform(-self.max_shear_degree,
self.max_shear_degree)
shear_matrix = self._get_shear_matrix(x_degree, y_degree)
# Translation
trans_x = random.uniform(-self.max_translate_ratio,
self.max_translate_ratio) * width
trans_y = random.uniform(-self.max_translate_ratio,
self.max_translate_ratio) * height
translate_matrix = self._get_translation_matrix(trans_x, trans_y)
warp_matrix = (
translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix)
return warp_matrix
@autocast_box_type()
def transform(self, results: dict) -> dict:
img = results['img']
height = img.shape[0] + self.border[1] * 2
width = img.shape[1] + self.border[0] * 2
warp_matrix = self._get_random_homography_matrix(height, width)
img = cv2.warpPerspective(
img,
warp_matrix,
dsize=(width, height),
borderValue=self.border_val)
results['img'] = img
results['img_shape'] = img.shape[:2]
bboxes = results['gt_bboxes']
num_bboxes = len(bboxes)
if num_bboxes:
bboxes.project_(warp_matrix)
if self.bbox_clip_border:
bboxes.clip_([height, width])
# remove outside bbox
valid_index = bboxes.is_inside([height, width]).numpy()
results['gt_bboxes'] = bboxes[valid_index]
results['gt_bboxes_labels'] = results['gt_bboxes_labels'][
valid_index]
results['gt_ignore_flags'] = results['gt_ignore_flags'][
valid_index]
if 'gt_masks' in results:
raise NotImplementedError('RandomAffine only supports bbox.')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(max_rotate_degree={self.max_rotate_degree}, '
repr_str += f'max_translate_ratio={self.max_translate_ratio}, '
repr_str += f'scaling_ratio_range={self.scaling_ratio_range}, '
repr_str += f'max_shear_degree={self.max_shear_degree}, '
repr_str += f'border={self.border}, '
repr_str += f'border_val={self.border_val}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@staticmethod
def _get_rotation_matrix(rotate_degrees: float) -> np.ndarray:
radian = math.radians(rotate_degrees)
rotation_matrix = np.array(
[[np.cos(radian), -np.sin(radian), 0.],
[np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]],
dtype=np.float32)
return rotation_matrix
@staticmethod
def _get_scaling_matrix(scale_ratio: float) -> np.ndarray:
scaling_matrix = np.array(
[[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]],
dtype=np.float32)
return scaling_matrix
@staticmethod
def _get_shear_matrix(x_shear_degrees: float,
y_shear_degrees: float) -> np.ndarray:
x_radian = math.radians(x_shear_degrees)
y_radian = math.radians(y_shear_degrees)
shear_matrix = np.array([[1, np.tan(x_radian), 0.],
[np.tan(y_radian), 1, 0.], [0., 0., 1.]],
dtype=np.float32)
return shear_matrix
@staticmethod
def _get_translation_matrix(x: float, y: float) -> np.ndarray:
translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]],
dtype=np.float32)
return translation_matrix
@TRANSFORMS.register_module()
class YOLOXHSVRandomAug(BaseTransform):
"""Apply HSV augmentation to image sequentially. It is referenced from
https://github.com/Megvii-
BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.
Required Keys:
- img
Modified Keys:
- img
Args:
hue_delta (int): delta of hue. Defaults to 5.
saturation_delta (int): delta of saturation. Defaults to 30.
value_delta (int): delat of value. Defaults to 30.
"""
def __init__(self,
hue_delta: int = 5,
saturation_delta: int = 30,
value_delta: int = 30) -> None:
self.hue_delta = hue_delta
self.saturation_delta = saturation_delta
self.value_delta = value_delta
@cache_randomness
def _get_hsv_gains(self):
hsv_gains = np.random.uniform(-1, 1, 3) * [
self.hue_delta, self.saturation_delta, self.value_delta
]
# random selection of h, s, v
hsv_gains *= np.random.randint(0, 2, 3)
# prevent overflow
hsv_gains = hsv_gains.astype(np.int16)
return hsv_gains
def transform(self, results: dict) -> dict:
img = results['img']
hsv_gains = self._get_hsv_gains()
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)
img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180
img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)
img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)
cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(hue_delta={self.hue_delta}, '
repr_str += f'saturation_delta={self.saturation_delta}, '
repr_str += f'value_delta={self.value_delta})'
return repr_str
@TRANSFORMS.register_module()
class CopyPaste(BaseTransform):
"""Simple Copy-Paste is a Strong Data Augmentation Method for Instance
Segmentation The simple copy-paste transform steps are as follows:
1. The destination image is already resized with aspect ratio kept,
cropped and padded.
2. Randomly select a source image, which is also already resized
with aspect ratio kept, cropped and padded in a similar way
as the destination image.
3. Randomly select some objects from the source image.
4. Paste these source objects to the destination image directly,
due to the source and destination image have the same size.
5. Update object masks of the destination image, for some origin objects
may be occluded.
6. Generate bboxes from the updated destination masks and
filter some objects which are totally occluded, and adjust bboxes
which are partly occluded.
7. Append selected source bboxes, masks, and labels.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- gt_masks (BitmapMasks) (optional)
Modified Keys:
- img
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
- gt_masks (optional)
Args:
max_num_pasted (int): The maximum number of pasted objects.
Defaults to 100.
bbox_occluded_thr (int): The threshold of occluded bbox.
Defaults to 10.
mask_occluded_thr (int): The threshold of occluded mask.
Defaults to 300.
selected (bool): Whether select objects or not. If select is False,
all objects of the source image will be pasted to the
destination image.
Defaults to True.
"""
def __init__(
self,
max_num_pasted: int = 100,
bbox_occluded_thr: int = 10,
mask_occluded_thr: int = 300,
selected: bool = True,
) -> None:
self.max_num_pasted = max_num_pasted
self.bbox_occluded_thr = bbox_occluded_thr
self.mask_occluded_thr = mask_occluded_thr
self.selected = selected
@cache_randomness
def get_indexes(self, dataset: BaseDataset) -> int:
"""Call function to collect indexes.s.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
list: Indexes.
"""
return random.randint(0, len(dataset))
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to make a copy-paste of image.
Args:
results (dict): Result dict.
Returns:
dict: Result dict with copy-paste transformed.
"""
assert 'mix_results' in results
num_images = len(results['mix_results'])
assert num_images == 1, \
f'CopyPaste only supports processing 2 images, got {num_images}'
if self.selected:
selected_results = self._select_object(results['mix_results'][0])
else:
selected_results = results['mix_results'][0]
return self._copy_paste(results, selected_results)
@cache_randomness
def _get_selected_inds(self, num_bboxes: int) -> np.ndarray:
max_num_pasted = min(num_bboxes + 1, self.max_num_pasted)
num_pasted = np.random.randint(0, max_num_pasted)
return np.random.choice(num_bboxes, size=num_pasted, replace=False)
def _select_object(self, results: dict) -> dict:
"""Select some objects from the source results."""
bboxes = results['gt_bboxes']
labels = results['gt_bboxes_labels']
masks = results['gt_masks']
ignore_flags = results['gt_ignore_flags']
selected_inds = self._get_selected_inds(bboxes.shape[0])
selected_bboxes = bboxes[selected_inds]
selected_labels = labels[selected_inds]
selected_masks = masks[selected_inds]
selected_ignore_flags = ignore_flags[selected_inds]
results['gt_bboxes'] = selected_bboxes
results['gt_bboxes_labels'] = selected_labels
results['gt_masks'] = selected_masks
results['gt_ignore_flags'] = selected_ignore_flags
return results
def _copy_paste(self, dst_results: dict, src_results: dict) -> dict:
"""CopyPaste transform function.
Args:
dst_results (dict): Result dict of the destination image.
src_results (dict): Result dict of the source image.
Returns:
dict: Updated result dict.
"""
dst_img = dst_results['img']
dst_bboxes = dst_results['gt_bboxes']
dst_labels = dst_results['gt_bboxes_labels']
dst_masks = dst_results['gt_masks']
dst_ignore_flags = dst_results['gt_ignore_flags']
src_img = src_results['img']
src_bboxes = src_results['gt_bboxes']
src_labels = src_results['gt_bboxes_labels']
src_masks = src_results['gt_masks']
src_ignore_flags = src_results['gt_ignore_flags']
if len(src_bboxes) == 0:
return dst_results
# update masks and generate bboxes from updated masks
composed_mask = np.where(np.any(src_masks.masks, axis=0), 1, 0)
updated_dst_masks = self._get_updated_masks(dst_masks, composed_mask)
updated_dst_bboxes = updated_dst_masks.get_bboxes(type(dst_bboxes))
assert len(updated_dst_bboxes) == len(updated_dst_masks)
# filter totally occluded objects
l1_distance = (updated_dst_bboxes.tensor - dst_bboxes.tensor).abs()
bboxes_inds = (l1_distance <= self.bbox_occluded_thr).all(
dim=-1).numpy()
masks_inds = updated_dst_masks.masks.sum(
axis=(1, 2)) > self.mask_occluded_thr
valid_inds = bboxes_inds | masks_inds
# Paste source objects to destination image directly
img = dst_img * (1 - composed_mask[..., np.newaxis]
) + src_img * composed_mask[..., np.newaxis]
bboxes = src_bboxes.cat([updated_dst_bboxes[valid_inds], src_bboxes])
labels = np.concatenate([dst_labels[valid_inds], src_labels])
masks = np.concatenate(
[updated_dst_masks.masks[valid_inds], src_masks.masks])
ignore_flags = np.concatenate(
[dst_ignore_flags[valid_inds], src_ignore_flags])
dst_results['img'] = img
dst_results['gt_bboxes'] = bboxes
dst_results['gt_bboxes_labels'] = labels
dst_results['gt_masks'] = BitmapMasks(masks, masks.shape[1],
masks.shape[2])
dst_results['gt_ignore_flags'] = ignore_flags
return dst_results
def _get_updated_masks(self, masks: BitmapMasks,
composed_mask: np.ndarray) -> BitmapMasks:
"""Update masks with composed mask."""
assert masks.masks.shape[-2:] == composed_mask.shape[-2:], \
'Cannot compare two arrays of different size'
masks.masks = np.where(composed_mask, 0, masks.masks)
return masks
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(max_num_pasted={self.max_num_pasted}, '
repr_str += f'bbox_occluded_thr={self.bbox_occluded_thr}, '
repr_str += f'mask_occluded_thr={self.mask_occluded_thr}, '
repr_str += f'selected={self.selected})'
return repr_str
@TRANSFORMS.register_module()
class RandomErasing(BaseTransform):
"""RandomErasing operation.
Random Erasing randomly selects a rectangle region
in an image and erases its pixels with random values.
`RandomErasing <https://arxiv.org/abs/1708.04896>`_.
Required Keys:
- img
- gt_bboxes (HorizontalBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- gt_masks (BitmapMasks) (optional)
Modified Keys:
- img
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
- gt_masks (optional)
Args:
n_patches (int or tuple[int, int]): Number of regions to be dropped.
If it is given as a tuple, number of patches will be randomly
selected from the closed interval [``n_patches[0]``,
``n_patches[1]``].
ratio (float or tuple[float, float]): The ratio of erased regions.
It can be ``float`` to use a fixed ratio or ``tuple[float, float]``
to randomly choose ratio from the interval.
squared (bool): Whether to erase square region. Defaults to True.
bbox_erased_thr (float): The threshold for the maximum area proportion
of the bbox to be erased. When the proportion of the area where the
bbox is erased is greater than the threshold, the bbox will be
removed. Defaults to 0.9.
img_border_value (int or float or tuple): The filled values for
image border. If float, the same fill value will be used for
all the three channels of image. If tuple, it should be 3 elements.
Defaults to 128.
mask_border_value (int): The fill value used for masks. Defaults to 0.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Defaults to 255.
"""
def __init__(
self,
n_patches: Union[int, Tuple[int, int]],
ratio: Union[float, Tuple[float, float]],
squared: bool = True,
bbox_erased_thr: float = 0.9,
img_border_value: Union[int, float, tuple] = 128,
mask_border_value: int = 0,
seg_ignore_label: int = 255,
) -> None:
if isinstance(n_patches, tuple):
assert len(n_patches) == 2 and 0 <= n_patches[0] < n_patches[1]
else:
n_patches = (n_patches, n_patches)
if isinstance(ratio, tuple):
assert len(ratio) == 2 and 0 <= ratio[0] < ratio[1] <= 1
else:
ratio = (ratio, ratio)
self.n_patches = n_patches
self.ratio = ratio
self.squared = squared
self.bbox_erased_thr = bbox_erased_thr
self.img_border_value = img_border_value
self.mask_border_value = mask_border_value
self.seg_ignore_label = seg_ignore_label
@cache_randomness
def _get_patches(self, img_shape: Tuple[int, int]) -> List[list]:
"""Get patches for random erasing."""
patches = []
n_patches = np.random.randint(self.n_patches[0], self.n_patches[1] + 1)
for _ in range(n_patches):
if self.squared:
ratio = np.random.random() * (self.ratio[1] -
self.ratio[0]) + self.ratio[0]
ratio = (ratio, ratio)
else:
ratio = (np.random.random() * (self.ratio[1] - self.ratio[0]) +
self.ratio[0], np.random.random() *
(self.ratio[1] - self.ratio[0]) + self.ratio[0])
ph, pw = int(img_shape[0] * ratio[0]), int(img_shape[1] * ratio[1])
px1, py1 = np.random.randint(0,
img_shape[1] - pw), np.random.randint(
0, img_shape[0] - ph)
px2, py2 = px1 + pw, py1 + ph
patches.append([px1, py1, px2, py2])
return np.array(patches)
def _transform_img(self, results: dict, patches: List[list]) -> None:
"""Random erasing the image."""
for patch in patches:
px1, py1, px2, py2 = patch
results['img'][py1:py2, px1:px2, :] = self.img_border_value
def _transform_bboxes(self, results: dict, patches: List[list]) -> None:
"""Random erasing the bboxes."""
bboxes = results['gt_bboxes']
# TODO: unify the logic by using operators in BaseBoxes.
assert isinstance(bboxes, HorizontalBoxes)
bboxes = bboxes.numpy()
left_top = np.maximum(bboxes[:, None, :2], patches[:, :2])
right_bottom = np.minimum(bboxes[:, None, 2:], patches[:, 2:])
wh = np.maximum(right_bottom - left_top, 0)
inter_areas = wh[:, :, 0] * wh[:, :, 1]
bbox_areas = (bboxes[:, 2] - bboxes[:, 0]) * (
bboxes[:, 3] - bboxes[:, 1])
bboxes_erased_ratio = inter_areas.sum(-1) / (bbox_areas + 1e-7)
valid_inds = bboxes_erased_ratio < self.bbox_erased_thr
results['gt_bboxes'] = HorizontalBoxes(bboxes[valid_inds])
results['gt_bboxes_labels'] = results['gt_bboxes_labels'][valid_inds]
results['gt_ignore_flags'] = results['gt_ignore_flags'][valid_inds]
if results.get('gt_masks', None) is not None:
results['gt_masks'] = results['gt_masks'][valid_inds]
def _transform_masks(self, results: dict, patches: List[list]) -> None:
"""Random erasing the masks."""
for patch in patches:
px1, py1, px2, py2 = patch
results['gt_masks'].masks[:, py1:py2,
px1:px2] = self.mask_border_value
def _transform_seg(self, results: dict, patches: List[list]) -> None:
"""Random erasing the segmentation map."""
for patch in patches:
px1, py1, px2, py2 = patch
results['gt_seg_map'][py1:py2, px1:px2] = self.seg_ignore_label
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Transform function to erase some regions of image."""
patches = self._get_patches(results['img_shape'])
self._transform_img(results, patches)
if results.get('gt_bboxes', None) is not None:
self._transform_bboxes(results, patches)
if results.get('gt_masks', None) is not None:
self._transform_masks(results, patches)
if results.get('gt_seg_map', None) is not None:
self._transform_seg(results, patches)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_patches={self.n_patches}, '
repr_str += f'ratio={self.ratio}, '
repr_str += f'squared={self.squared}, '
repr_str += f'bbox_erased_thr={self.bbox_erased_thr}, '
repr_str += f'img_border_value={self.img_border_value}, '
repr_str += f'mask_border_value={self.mask_border_value}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label})'
return repr_str
@TRANSFORMS.register_module()
class CachedMosaic(Mosaic):
"""Cached mosaic augmentation.
Cached mosaic transform will random select images from the cache
and combine them into one output image.
.. code:: text
mosaic transform
center_x
+------------------------------+
| pad | pad |
| +-----------+ |
| | | |
| | image1 |--------+ |
| | | | |
| | | image2 | |
center_y |----+-------------+-----------|
| | cropped | |
|pad | image3 | image4 |
| | | |
+----|-------------+-----------+
| |
+-------------+
The cached mosaic transform steps are as follows:
1. Append the results from the last transform into the cache.
2. Choose the mosaic center as the intersections of 4 images
3. Get the left top image according to the index, and randomly
sample another 3 images from the result cache.
4. Sub image will be cropped if image is larger than mosaic patch
Required Keys:
- img
- gt_bboxes (np.float32) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image size after mosaic pipeline of single
image. The shape order should be (width, height).
Defaults to (640, 640).
center_ratio_range (Sequence[float]): Center ratio range of mosaic
output. Defaults to (0.5, 1.5).
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
pad_val (int): Pad value. Defaults to 114.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 10 caches for each image suffices for
randomness. Defaults to 40.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
"""
def __init__(self,
*args,
max_cached_images: int = 40,
random_pop: bool = True,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.results_cache = []
self.random_pop = random_pop
assert max_cached_images >= 4, 'The length of cache must >= 4, ' \
f'but got {max_cached_images}.'
self.max_cached_images = max_cached_images
@cache_randomness
def get_indexes(self, cache: list) -> list:
"""Call function to collect indexes.
Args:
cache (list): The results cache.
Returns:
list: indexes.
"""
indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]
return indexes
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Mosaic transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
# cache and pop images
self.results_cache.append(copy.deepcopy(results))
if len(self.results_cache) > self.max_cached_images:
if self.random_pop:
index = random.randint(0, len(self.results_cache) - 1)
else:
index = 0
self.results_cache.pop(index)
if len(self.results_cache) <= 4:
return results
if random.uniform(0, 1) > self.prob:
return results
indices = self.get_indexes(self.results_cache)
mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]
# TODO: refactor mosaic to reuse these code.
mosaic_bboxes = []
mosaic_bboxes_labels = []
mosaic_ignore_flags = []
mosaic_masks = []
with_mask = True if 'gt_masks' in results else False
if len(results['img'].shape) == 3:
mosaic_img = np.full(
(int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),
self.pad_val,
dtype=results['img'].dtype)
else:
mosaic_img = np.full(
(int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),
self.pad_val,
dtype=results['img'].dtype)
# mosaic center x, y
center_x = int(
random.uniform(*self.center_ratio_range) * self.img_scale[0])
center_y = int(
random.uniform(*self.center_ratio_range) * self.img_scale[1])
center_position = (center_x, center_y)
loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')
for i, loc in enumerate(loc_strs):
if loc == 'top_left':
results_patch = copy.deepcopy(results)
else:
results_patch = copy.deepcopy(mix_results[i - 1])
img_i = results_patch['img']
h_i, w_i = img_i.shape[:2]
# keep_ratio resize
scale_ratio_i = min(self.img_scale[1] / h_i,
self.img_scale[0] / w_i)
img_i = mmcv.imresize(
img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))
# compute the combine parameters
paste_coord, crop_coord = self._mosaic_combine(
loc, center_position, img_i.shape[:2][::-1])
x1_p, y1_p, x2_p, y2_p = paste_coord
x1_c, y1_c, x2_c, y2_c = crop_coord
# crop and paste image
mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]
# adjust coordinate
gt_bboxes_i = results_patch['gt_bboxes']
gt_bboxes_labels_i = results_patch['gt_bboxes_labels']
gt_ignore_flags_i = results_patch['gt_ignore_flags']
padw = x1_p - x1_c
padh = y1_p - y1_c
gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])
gt_bboxes_i.translate_([padw, padh])
mosaic_bboxes.append(gt_bboxes_i)
mosaic_bboxes_labels.append(gt_bboxes_labels_i)
mosaic_ignore_flags.append(gt_ignore_flags_i)
if with_mask and results_patch.get('gt_masks', None) is not None:
gt_masks_i = results_patch['gt_masks']
gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))
gt_masks_i = gt_masks_i.translate(
out_shape=(int(self.img_scale[0] * 2),
int(self.img_scale[1] * 2)),
offset=padw,
direction='horizontal')
gt_masks_i = gt_masks_i.translate(
out_shape=(int(self.img_scale[0] * 2),
int(self.img_scale[1] * 2)),
offset=padh,
direction='vertical')
mosaic_masks.append(gt_masks_i)
mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)
mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)
mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)
if self.bbox_clip_border:
mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])
# remove outside bboxes
inside_inds = mosaic_bboxes.is_inside(
[2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()
mosaic_bboxes = mosaic_bboxes[inside_inds]
mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]
mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]
results['img'] = mosaic_img
results['img_shape'] = mosaic_img.shape[:2]
results['gt_bboxes'] = mosaic_bboxes
results['gt_bboxes_labels'] = mosaic_bboxes_labels
results['gt_ignore_flags'] = mosaic_ignore_flags
if with_mask:
mosaic_masks = mosaic_masks[0].cat(mosaic_masks)
results['gt_masks'] = mosaic_masks[inside_inds]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'center_ratio_range={self.center_ratio_range}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob}, '
repr_str += f'max_cached_images={self.max_cached_images}, '
repr_str += f'random_pop={self.random_pop})'
return repr_str
@TRANSFORMS.register_module()
class CachedMixUp(BaseTransform):
"""Cached mixup data augmentation.
.. code:: text
mixup transform
+------------------------------+
| mixup image | |
| +--------|--------+ |
| | | | |
|---------------+ | |
| | | |
| | image | |
| | | |
| | | |
| |-----------------+ |
| pad |
+------------------------------+
The cached mixup transform steps are as follows:
1. Append the results from the last transform into the cache.
2. Another random image is picked from the cache and embedded in
the top left patch(after padding and resizing)
3. The target of mixup transform is the weighted average of mixup
image and origin image.
Required Keys:
- img
- gt_bboxes (np.float32) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image output size after mixup pipeline.
The shape order should be (width, height). Defaults to (640, 640).
ratio_range (Sequence[float]): Scale ratio of mixup image.
Defaults to (0.5, 1.5).
flip_ratio (float): Horizontal flip ratio of mixup image.
Defaults to 0.5.
pad_val (int): Pad value. Defaults to 114.
max_iters (int): The maximum number of iterations. If the number of
iterations is greater than `max_iters`, but gt_bbox is still
empty, then the iteration is terminated. Defaults to 15.
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 10 caches for each image suffices for
randomness. Defaults to 20.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
"""
def __init__(self,
img_scale: Tuple[int, int] = (640, 640),
ratio_range: Tuple[float, float] = (0.5, 1.5),
flip_ratio: float = 0.5,
pad_val: float = 114.0,
max_iters: int = 15,
bbox_clip_border: bool = True,
max_cached_images: int = 20,
random_pop: bool = True,
prob: float = 1.0) -> None:
assert isinstance(img_scale, tuple)
assert max_cached_images >= 2, 'The length of cache must >= 2, ' \
f'but got {max_cached_images}.'
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \
f'got {prob}.'
self.dynamic_scale = img_scale
self.ratio_range = ratio_range
self.flip_ratio = flip_ratio
self.pad_val = pad_val
self.max_iters = max_iters
self.bbox_clip_border = bbox_clip_border
self.results_cache = []
self.max_cached_images = max_cached_images
self.random_pop = random_pop
self.prob = prob
@cache_randomness
def get_indexes(self, cache: list) -> int:
"""Call function to collect indexes.
Args:
cache (list): The result cache.
Returns:
int: index.
"""
for i in range(self.max_iters):
index = random.randint(0, len(cache) - 1)
gt_bboxes_i = cache[index]['gt_bboxes']
if len(gt_bboxes_i) != 0:
break
return index
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""MixUp transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
# cache and pop images
self.results_cache.append(copy.deepcopy(results))
if len(self.results_cache) > self.max_cached_images:
if self.random_pop:
index = random.randint(0, len(self.results_cache) - 1)
else:
index = 0
self.results_cache.pop(index)
if len(self.results_cache) <= 1:
return results
if random.uniform(0, 1) > self.prob:
return results
index = self.get_indexes(self.results_cache)
retrieve_results = copy.deepcopy(self.results_cache[index])
# TODO: refactor mixup to reuse these code.
if retrieve_results['gt_bboxes'].shape[0] == 0:
# empty bbox
return results
retrieve_img = retrieve_results['img']
with_mask = True if 'gt_masks' in results else False
jit_factor = random.uniform(*self.ratio_range)
is_filp = random.uniform(0, 1) > self.flip_ratio
if len(retrieve_img.shape) == 3:
out_img = np.ones(
(self.dynamic_scale[1], self.dynamic_scale[0], 3),
dtype=retrieve_img.dtype) * self.pad_val
else:
out_img = np.ones(
self.dynamic_scale[::-1],
dtype=retrieve_img.dtype) * self.pad_val
# 1. keep_ratio resize
scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],
self.dynamic_scale[0] / retrieve_img.shape[1])
retrieve_img = mmcv.imresize(
retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),
int(retrieve_img.shape[0] * scale_ratio)))
# 2. paste
out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img
# 3. scale jit
scale_ratio *= jit_factor
out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),
int(out_img.shape[0] * jit_factor)))
# 4. flip
if is_filp:
out_img = out_img[:, ::-1, :]
# 5. random crop
ori_img = results['img']
origin_h, origin_w = out_img.shape[:2]
target_h, target_w = ori_img.shape[:2]
padded_img = np.ones((max(origin_h, target_h), max(
origin_w, target_w), 3)) * self.pad_val
padded_img = padded_img.astype(np.uint8)
padded_img[:origin_h, :origin_w] = out_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w)
padded_cropped_img = padded_img[y_offset:y_offset + target_h,
x_offset:x_offset + target_w]
# 6. adjust bbox
retrieve_gt_bboxes = retrieve_results['gt_bboxes']
retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])
if with_mask:
retrieve_gt_masks = retrieve_results['gt_masks'].rescale(
scale_ratio)
if self.bbox_clip_border:
retrieve_gt_bboxes.clip_([origin_h, origin_w])
if is_filp:
retrieve_gt_bboxes.flip_([origin_h, origin_w],
direction='horizontal')
if with_mask:
retrieve_gt_masks = retrieve_gt_masks.flip()
# 7. filter
cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()
cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])
if with_mask:
retrieve_gt_masks = retrieve_gt_masks.translate(
out_shape=(target_h, target_w),
offset=-x_offset,
direction='horizontal')
retrieve_gt_masks = retrieve_gt_masks.translate(
out_shape=(target_h, target_w),
offset=-y_offset,
direction='vertical')
if self.bbox_clip_border:
cp_retrieve_gt_bboxes.clip_([target_h, target_w])
# 8. mix up
ori_img = ori_img.astype(np.float32)
mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)
retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']
retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']
mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(
(results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)
mixup_gt_bboxes_labels = np.concatenate(
(results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)
mixup_gt_ignore_flags = np.concatenate(
(results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)
if with_mask:
mixup_gt_masks = retrieve_gt_masks.cat(
[results['gt_masks'], retrieve_gt_masks])
# remove outside bbox
inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()
mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]
mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]
mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]
if with_mask:
mixup_gt_masks = mixup_gt_masks[inside_inds]
results['img'] = mixup_img.astype(np.uint8)
results['img_shape'] = mixup_img.shape[:2]
results['gt_bboxes'] = mixup_gt_bboxes
results['gt_bboxes_labels'] = mixup_gt_bboxes_labels
results['gt_ignore_flags'] = mixup_gt_ignore_flags
if with_mask:
results['gt_masks'] = mixup_gt_masks
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(dynamic_scale={self.dynamic_scale}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'flip_ratio={self.flip_ratio}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'max_iters={self.max_iters}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border}, '
repr_str += f'max_cached_images={self.max_cached_images}, '
repr_str += f'random_pop={self.random_pop}, '
repr_str += f'prob={self.prob})'
return repr_str
| 140,750 | 37.699753 | 79 |
py
|
ERD
|
ERD-main/mmdet/visualization/palette.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import mmcv
import numpy as np
from mmengine.utils import is_str
def palette_val(palette: List[tuple]) -> List[tuple]:
"""Convert palette to matplotlib palette.
Args:
palette (List[tuple]): A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette: Union[List[tuple], str, tuple],
num_classes: int) -> List[Tuple[int]]:
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.METAINFO['palette']
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.METAINFO['palette']
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.METAINFO['palette']
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.METAINFO['palette']
elif is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
def _get_adaptive_scales(areas: np.ndarray,
min_area: int = 800,
max_area: int = 30000) -> np.ndarray:
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``min_area``, the scale is 0.5 while the area is larger than
``max_area``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Defaults to 800.
max_area (int): Upper bound areas for adaptive scales.
Defaults to 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def jitter_color(color: tuple) -> tuple:
"""Randomly jitter the given color in order to better distinguish instances
with the same class.
Args:
color (tuple): The RGB color tuple. Each value is between [0, 255].
Returns:
tuple: The jittered color tuple.
"""
jitter = np.random.rand(3)
jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255
color = np.clip(jitter + color, 0, 255).astype(np.uint8)
return tuple(color)
| 3,608 | 32.110092 | 79 |
py
|
ERD
|
ERD-main/mmdet/visualization/local_visualizer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmengine.dist import master_only
from mmengine.structures import InstanceData, PixelData
from mmengine.visualization import Visualizer
from ..evaluation import INSTANCE_OFFSET
from ..registry import VISUALIZERS
from ..structures import DetDataSample
from ..structures.mask import BitmapMasks, PolygonMasks, bitmap_to_polygon
from .palette import _get_adaptive_scales, get_palette, jitter_color
@VISUALIZERS.register_module()
class DetLocalVisualizer(Visualizer):
"""MMDetection Local Visualizer.
Args:
name (str): Name of the instance. Defaults to 'visualizer'.
image (np.ndarray, optional): the origin image to draw. The format
should be RGB. Defaults to None.
vis_backends (list, optional): Visual backend config list.
Defaults to None.
save_dir (str, optional): Save file dir for all storage backends.
If it is None, the backend storage will not save any data.
bbox_color (str, tuple(int), optional): Color of bbox lines.
The tuple of color should be in BGR order. Defaults to None.
text_color (str, tuple(int), optional): Color of texts.
The tuple of color should be in BGR order.
Defaults to (200, 200, 200).
mask_color (str, tuple(int), optional): Color of masks.
The tuple of color should be in BGR order.
Defaults to None.
line_width (int, float): The linewidth of lines.
Defaults to 3.
alpha (int, float): The transparency of bboxes or mask.
Defaults to 0.8.
Examples:
>>> import numpy as np
>>> import torch
>>> from mmengine.structures import InstanceData
>>> from mmdet.structures import DetDataSample
>>> from mmdet.visualization import DetLocalVisualizer
>>> det_local_visualizer = DetLocalVisualizer()
>>> image = np.random.randint(0, 256,
... size=(10, 12, 3)).astype('uint8')
>>> gt_instances = InstanceData()
>>> gt_instances.bboxes = torch.Tensor([[1, 2, 2, 5]])
>>> gt_instances.labels = torch.randint(0, 2, (1,))
>>> gt_det_data_sample = DetDataSample()
>>> gt_det_data_sample.gt_instances = gt_instances
>>> det_local_visualizer.add_datasample('image', image,
... gt_det_data_sample)
>>> det_local_visualizer.add_datasample(
... 'image', image, gt_det_data_sample,
... out_file='out_file.jpg')
>>> det_local_visualizer.add_datasample(
... 'image', image, gt_det_data_sample,
... show=True)
>>> pred_instances = InstanceData()
>>> pred_instances.bboxes = torch.Tensor([[2, 4, 4, 8]])
>>> pred_instances.labels = torch.randint(0, 2, (1,))
>>> pred_det_data_sample = DetDataSample()
>>> pred_det_data_sample.pred_instances = pred_instances
>>> det_local_visualizer.add_datasample('image', image,
... gt_det_data_sample,
... pred_det_data_sample)
"""
def __init__(self,
name: str = 'visualizer',
image: Optional[np.ndarray] = None,
vis_backends: Optional[Dict] = None,
save_dir: Optional[str] = None,
bbox_color: Optional[Union[str, Tuple[int]]] = None,
text_color: Optional[Union[str,
Tuple[int]]] = (200, 200, 200),
mask_color: Optional[Union[str, Tuple[int]]] = None,
line_width: Union[int, float] = 3,
alpha: float = 0.8) -> None:
super().__init__(
name=name,
image=image,
vis_backends=vis_backends,
save_dir=save_dir)
self.bbox_color = bbox_color
self.text_color = text_color
self.mask_color = mask_color
self.line_width = line_width
self.alpha = alpha
# Set default value. When calling
# `DetLocalVisualizer().dataset_meta=xxx`,
# it will override the default value.
self.dataset_meta = {}
def _draw_instances(self, image: np.ndarray, instances: ['InstanceData'],
classes: Optional[List[str]],
palette: Optional[List[tuple]]) -> np.ndarray:
"""Draw instances of GT or prediction.
Args:
image (np.ndarray): The image to draw.
instances (:obj:`InstanceData`): Data structure for
instance-level annotations or predictions.
classes (List[str], optional): Category information.
palette (List[tuple], optional): Palette information
corresponding to the category.
Returns:
np.ndarray: the drawn image which channel is RGB.
"""
self.set_image(image)
if 'bboxes' in instances:
bboxes = instances.bboxes
labels = instances.labels
max_label = int(max(labels) if len(labels) > 0 else 0)
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
bbox_color = palette if self.bbox_color is None \
else self.bbox_color
bbox_palette = get_palette(bbox_color, max_label + 1)
colors = [bbox_palette[label] for label in labels]
self.draw_bboxes(
bboxes,
edge_colors=colors,
alpha=self.alpha,
line_widths=self.line_width)
positions = bboxes[:, :2] + self.line_width
areas = (bboxes[:, 3] - bboxes[:, 1]) * (
bboxes[:, 2] - bboxes[:, 0])
scales = _get_adaptive_scales(areas)
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = classes[
label] if classes is not None else f'class {label}'
if 'scores' in instances:
score = round(float(instances.scores[i]) * 100, 1)
label_text += f': {score}'
self.draw_texts(
label_text,
pos,
colors=text_colors[i],
font_sizes=int(13 * scales[i]),
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
if 'masks' in instances:
labels = instances.labels
masks = instances.masks
if isinstance(masks, torch.Tensor):
masks = masks.numpy()
elif isinstance(masks, (PolygonMasks, BitmapMasks)):
masks = masks.to_ndarray()
masks = masks.astype(bool)
max_label = int(max(labels) if len(labels) > 0 else 0)
mask_color = palette if self.mask_color is None \
else self.mask_color
mask_palette = get_palette(mask_color, max_label + 1)
colors = [jitter_color(mask_palette[label]) for label in labels]
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
polygons = []
for i, mask in enumerate(masks):
contours, _ = bitmap_to_polygon(mask)
polygons.extend(contours)
self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha)
self.draw_binary_masks(masks, colors=colors, alphas=self.alpha)
if len(labels) > 0 and \
('bboxes' not in instances or
instances.bboxes.sum() == 0):
# instances.bboxes.sum()==0 represent dummy bboxes.
# A typical example of SOLO does not exist bbox branch.
areas = []
positions = []
for mask in masks:
_, _, stats, centroids = cv2.connectedComponentsWithStats(
mask.astype(np.uint8), connectivity=8)
if stats.shape[0] > 1:
largest_id = np.argmax(stats[1:, -1]) + 1
positions.append(centroids[largest_id])
areas.append(stats[largest_id, -1])
areas = np.stack(areas, axis=0)
scales = _get_adaptive_scales(areas)
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = classes[
label] if classes is not None else f'class {label}'
if 'scores' in instances:
score = round(float(instances.scores[i]) * 100, 1)
label_text += f': {score}'
self.draw_texts(
label_text,
pos,
colors=text_colors[i],
font_sizes=int(13 * scales[i]),
horizontal_alignments='center',
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
return self.get_image()
def _draw_panoptic_seg(self, image: np.ndarray,
panoptic_seg: ['PixelData'],
classes: Optional[List[str]]) -> np.ndarray:
"""Draw panoptic seg of GT or prediction.
Args:
image (np.ndarray): The image to draw.
panoptic_seg (:obj:`PixelData`): Data structure for
pixel-level annotations or predictions.
classes (List[str], optional): Category information.
Returns:
np.ndarray: the drawn image which channel is RGB.
"""
# TODO: Is there a way to bypass?
num_classes = len(classes)
panoptic_seg = panoptic_seg.sem_seg[0]
ids = np.unique(panoptic_seg)[::-1]
legal_indices = ids != num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (panoptic_seg[None] == ids[:, None, None])
max_label = int(max(labels) if len(labels) > 0 else 0)
mask_palette = get_palette(self.mask_color, max_label + 1)
colors = [mask_palette[label] for label in labels]
self.set_image(image)
# draw segm
polygons = []
for i, mask in enumerate(segms):
contours, _ = bitmap_to_polygon(mask)
polygons.extend(contours)
self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha)
self.draw_binary_masks(segms, colors=colors, alphas=self.alpha)
# draw label
areas = []
positions = []
for mask in segms:
_, _, stats, centroids = cv2.connectedComponentsWithStats(
mask.astype(np.uint8), connectivity=8)
max_id = np.argmax(stats[1:, -1]) + 1
positions.append(centroids[max_id])
areas.append(stats[max_id, -1])
areas = np.stack(areas, axis=0)
scales = _get_adaptive_scales(areas)
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = classes[label]
self.draw_texts(
label_text,
pos,
colors=text_colors[i],
font_sizes=int(13 * scales[i]),
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}],
horizontal_alignments='center')
return self.get_image()
@master_only
def add_datasample(
self,
name: str,
image: np.ndarray,
data_sample: Optional['DetDataSample'] = None,
draw_gt: bool = True,
draw_pred: bool = True,
show: bool = False,
wait_time: float = 0,
# TODO: Supported in mmengine's Viusalizer.
out_file: Optional[str] = None,
pred_score_thr: float = 0.3,
step: int = 0) -> None:
"""Draw datasample and save to all backends.
- If GT and prediction are plotted at the same time, they are
displayed in a stitched image where the left image is the
ground truth and the right image is the prediction.
- If ``show`` is True, all storage backends are ignored, and
the images will be displayed in a local window.
- If ``out_file`` is specified, the drawn image will be
saved to ``out_file``. t is usually used when the display
is not available.
Args:
name (str): The image identifier.
image (np.ndarray): The image to draw.
data_sample (:obj:`DetDataSample`, optional): A data
sample that contain annotations and predictions.
Defaults to None.
draw_gt (bool): Whether to draw GT DetDataSample. Default to True.
draw_pred (bool): Whether to draw Prediction DetDataSample.
Defaults to True.
show (bool): Whether to display the drawn image. Default to False.
wait_time (float): The interval of show (s). Defaults to 0.
out_file (str): Path to output file. Defaults to None.
pred_score_thr (float): The threshold to visualize the bboxes
and masks. Defaults to 0.3.
step (int): Global step value to record. Defaults to 0.
"""
image = image.clip(0, 255).astype(np.uint8)
classes = self.dataset_meta.get('classes', None)
palette = self.dataset_meta.get('palette', None)
gt_img_data = None
pred_img_data = None
if data_sample is not None:
data_sample = data_sample.cpu()
if draw_gt and data_sample is not None:
gt_img_data = image
if 'gt_instances' in data_sample:
gt_img_data = self._draw_instances(image,
data_sample.gt_instances,
classes, palette)
if 'gt_panoptic_seg' in data_sample:
assert classes is not None, 'class information is ' \
'not provided when ' \
'visualizing panoptic ' \
'segmentation results.'
gt_img_data = self._draw_panoptic_seg(
gt_img_data, data_sample.gt_panoptic_seg, classes)
if draw_pred and data_sample is not None:
pred_img_data = image
if 'pred_instances' in data_sample:
pred_instances = data_sample.pred_instances
pred_instances = pred_instances[
pred_instances.scores > pred_score_thr]
pred_img_data = self._draw_instances(image, pred_instances,
classes, palette)
if 'pred_panoptic_seg' in data_sample:
assert classes is not None, 'class information is ' \
'not provided when ' \
'visualizing panoptic ' \
'segmentation results.'
pred_img_data = self._draw_panoptic_seg(
pred_img_data, data_sample.pred_panoptic_seg.numpy(),
classes)
if gt_img_data is not None and pred_img_data is not None:
drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1)
elif gt_img_data is not None:
drawn_img = gt_img_data
elif pred_img_data is not None:
drawn_img = pred_img_data
else:
# Display the original image directly if nothing is drawn.
drawn_img = image
# It is convenient for users to obtain the drawn image.
# For example, the user wants to obtain the drawn image and
# save it as a video during video inference.
self.set_image(drawn_img)
if show:
self.show(drawn_img, win_name=name, wait_time=wait_time)
if out_file is not None:
mmcv.imwrite(drawn_img[..., ::-1], out_file)
else:
self.add_image(name, drawn_img, step)
| 17,127 | 41.606965 | 79 |
py
|
ERD
|
ERD-main/mmdet/visualization/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import DetLocalVisualizer
from .palette import get_palette, jitter_color, palette_val
__all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color']
| 237 | 38.666667 | 78 |
py
|
ERD
|
ERD-main/mmdet/engine/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hooks import * # noqa: F401, F403
from .optimizers import * # noqa: F401, F403
from .runner import * # noqa: F401, F403
from .schedulers import * # noqa: F401, F403
| 223 | 36.333333 | 47 |
py
|
ERD
|
ERD-main/mmdet/engine/runner/loops.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.model import is_model_wrapper
from mmengine.runner import ValLoop
from mmdet.registry import LOOPS
@LOOPS.register_module()
class TeacherStudentValLoop(ValLoop):
"""Loop for validation of model teacher and student."""
def run(self):
"""Launch validation for model teacher and student."""
self.runner.call_hook('before_val')
self.runner.call_hook('before_val_epoch')
self.runner.model.eval()
model = self.runner.model
if is_model_wrapper(model):
model = model.module
assert hasattr(model, 'teacher')
assert hasattr(model, 'student')
predict_on = model.semi_test_cfg.get('predict_on', None)
multi_metrics = dict()
for _predict_on in ['teacher', 'student']:
model.semi_test_cfg['predict_on'] = _predict_on
for idx, data_batch in enumerate(self.dataloader):
self.run_iter(idx, data_batch)
# compute metrics
metrics = self.evaluator.evaluate(len(self.dataloader.dataset))
multi_metrics.update(
{'/'.join((_predict_on, k)): v
for k, v in metrics.items()})
model.semi_test_cfg['predict_on'] = predict_on
self.runner.call_hook('after_val_epoch', metrics=multi_metrics)
self.runner.call_hook('after_val')
| 1,407 | 34.2 | 75 |
py
|
ERD
|
ERD-main/mmdet/engine/runner/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .loops import TeacherStudentValLoop
__all__ = ['TeacherStudentValLoop']
| 126 | 24.4 | 47 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/set_epoch_info_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.hooks import Hook
from mmengine.model.wrappers import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner):
epoch = runner.epoch
model = runner.model
if is_model_wrapper(model):
model = model.module
model.set_epoch(epoch)
| 480 | 25.722222 | 54 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/checkloss_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
self.interval = interval
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[dict] = None) -> None:
"""Regularly check whether the loss is valid every n iterations.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (dict, Optional): Data from dataloader.
Defaults to None.
outputs (dict, Optional): Outputs from model. Defaults to None.
"""
if self.every_n_train_iters(runner, self.interval):
assert torch.isfinite(outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
| 1,406 | 31.72093 | 78 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/pipeline_switch_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epoch.
switch_pipeline (list[dict]): the pipeline to switch to.
"""
def __init__(self, switch_epoch, switch_pipeline):
self.switch_epoch = switch_epoch
self.switch_pipeline = switch_pipeline
self._restart_dataloader = False
def before_train_epoch(self, runner):
"""switch pipeline."""
epoch = runner.epoch
train_loader = runner.train_dataloader
if epoch == self.switch_epoch:
runner.logger.info('Switch pipeline now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.pipeline = Compose(self.switch_pipeline)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
| 1,608 | 36.418605 | 76 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
def trigger_visualization_hook(cfg, args):
default_hooks = cfg.default_hooks
if 'visualization' in default_hooks:
visualization_hook = default_hooks['visualization']
# Turn on visualization
visualization_hook['draw'] = True
if args.show:
visualization_hook['show'] = True
visualization_hook['wait_time'] = args.wait_time
if args.show_dir:
visualization_hook['test_out_dir'] = args.show_dir
else:
raise RuntimeError(
'VisualizationHook must be included in default_hooks.'
'refer to usage '
'"visualization=dict(type=\'VisualizationHook\')"')
return cfg
| 737 | 35.9 | 66 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/visualization_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from typing import Optional, Sequence
import mmcv
from mmengine.fileio import get
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmengine.utils import mkdir_or_exist
from mmengine.visualization import Visualizer
from mmdet.registry import HOOKS
from mmdet.structures import DetDataSample
@HOOKS.register_module()
class DetVisualizationHook(Hook):
"""Detection Visualization Hook. Used to visualize validation and testing
process prediction results.
In the testing phase:
1. If ``show`` is True, it means that only the prediction results are
visualized without storing data, so ``vis_backends`` needs to
be excluded.
2. If ``test_out_dir`` is specified, it means that the prediction results
need to be saved to ``test_out_dir``. In order to avoid vis_backends
also storing data, so ``vis_backends`` needs to be excluded.
3. ``vis_backends`` takes effect if the user does not specify ``show``
and `test_out_dir``. You can set ``vis_backends`` to WandbVisBackend or
TensorboardVisBackend to store the prediction result in Wandb or
Tensorboard.
Args:
draw (bool): whether to draw prediction results. If it is False,
it means that no drawing will be done. Defaults to False.
interval (int): The interval of visualization. Defaults to 50.
score_thr (float): The threshold to visualize the bboxes
and masks. Defaults to 0.3.
show (bool): Whether to display the drawn image. Default to False.
wait_time (float): The interval of show (s). Defaults to 0.
test_out_dir (str, optional): directory where painted images
will be saved in testing process.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
draw: bool = False,
interval: int = 50,
score_thr: float = 0.3,
show: bool = False,
wait_time: float = 0.,
test_out_dir: Optional[str] = None,
backend_args: dict = None):
self._visualizer: Visualizer = Visualizer.get_current_instance()
self.interval = interval
self.score_thr = score_thr
self.show = show
if self.show:
# No need to think about vis backends.
self._visualizer._vis_backends = {}
warnings.warn('The show is True, it means that only '
'the prediction results are visualized '
'without storing data, so vis_backends '
'needs to be excluded.')
self.wait_time = wait_time
self.backend_args = backend_args
self.draw = draw
self.test_out_dir = test_out_dir
self._test_index = 0
def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
outputs: Sequence[DetDataSample]) -> None:
"""Run after every ``self.interval`` validation iterations.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`DetDataSample`]]): A batch of data samples
that contain annotations and predictions.
"""
if self.draw is False:
return
# There is no guarantee that the same batch of images
# is visualized for each evaluation.
total_curr_iter = runner.iter + batch_idx
# Visualize only the first data
img_path = outputs[0].img_path
img_bytes = get(img_path, backend_args=self.backend_args)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
if total_curr_iter % self.interval == 0:
self._visualizer.add_datasample(
osp.basename(img_path) if self.show else 'val_img',
img,
data_sample=outputs[0],
show=self.show,
wait_time=self.wait_time,
pred_score_thr=self.score_thr,
step=total_curr_iter)
def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
outputs: Sequence[DetDataSample]) -> None:
"""Run after every testing iterations.
Args:
runner (:obj:`Runner`): The runner of the testing process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`DetDataSample`]): A batch of data samples
that contain annotations and predictions.
"""
if self.draw is False:
return
if self.test_out_dir is not None:
self.test_out_dir = osp.join(runner.work_dir, runner.timestamp,
self.test_out_dir)
mkdir_or_exist(self.test_out_dir)
for data_sample in outputs:
self._test_index += 1
img_path = data_sample.img_path
img_bytes = get(img_path, backend_args=self.backend_args)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
out_file = None
if self.test_out_dir is not None:
out_file = osp.basename(img_path)
out_file = osp.join(self.test_out_dir, out_file)
self._visualizer.add_datasample(
osp.basename(img_path) if self.show else 'test_img',
img,
data_sample=data_sample,
show=self.show,
wait_time=self.wait_time,
pred_score_thr=self.score_thr,
out_file=out_file,
step=self._test_index)
| 6,010 | 39.614865 | 79 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/sync_norm_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmengine.dist import get_dist_info
from mmengine.hooks import Hook
from torch import nn
from mmdet.registry import HOOKS
from mmdet.utils import all_reduce_dict
def get_norm_states(module: nn.Module) -> OrderedDict:
"""Get the state_dict of batch norms in the module."""
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states before validation, currently used in YOLOX."""
def before_val_epoch(self, runner):
"""Synchronizing norm."""
module = runner.model
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
if len(norm_states) == 0:
return
# TODO: use `all_reduce_dict` in mmengine
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
| 1,247 | 31.842105 | 77 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/mean_teacher_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MeanTeacherHook(Hook):
"""Mean Teacher Hook.
Mean Teacher is an efficient semi-supervised learning method in
`Mean Teacher <https://arxiv.org/abs/1703.01780>`_.
This method requires two models with exactly the same structure,
as the student model and the teacher model, respectively.
The student model updates the parameters through gradient descent,
and the teacher model updates the parameters through
exponential moving average of the student model.
Compared with the student model, the teacher model
is smoother and accumulates more knowledge.
Args:
momentum (float): The momentum used for updating teacher's parameter.
Teacher's parameter are updated with the formula:
`teacher = (1-momentum) * teacher + momentum * student`.
Defaults to 0.001.
interval (int): Update teacher's parameter every interval iteration.
Defaults to 1.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to True.
"""
def __init__(self,
momentum: float = 0.001,
interval: int = 1,
skip_buffer=True) -> None:
assert 0 < momentum < 1
self.momentum = momentum
self.interval = interval
self.skip_buffers = skip_buffer
def before_train(self, runner: Runner) -> None:
"""To check that teacher model and student model exist."""
model = runner.model
if is_model_wrapper(model):
model = model.module
assert hasattr(model, 'teacher')
assert hasattr(model, 'student')
# only do it at initial stage
if runner.iter == 0:
self.momentum_update(model, 1)
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[dict] = None) -> None:
"""Update teacher's parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
model = runner.model
if is_model_wrapper(model):
model = model.module
self.momentum_update(model, self.momentum)
def momentum_update(self, model: nn.Module, momentum: float) -> None:
"""Compute the moving average of the parameters using exponential
moving average."""
if self.skip_buffers:
for (src_name, src_parm), (dst_name, dst_parm) in zip(
model.student.named_parameters(),
model.teacher.named_parameters()):
dst_parm.data.mul_(1 - momentum).add_(
src_parm.data, alpha=momentum)
else:
for (src_parm,
dst_parm) in zip(model.student.state_dict().values(),
model.teacher.state_dict().values()):
# exclude num_tracking
if dst_parm.dtype.is_floating_point:
dst_parm.data.mul_(1 - momentum).add_(
src_parm.data, alpha=momentum)
| 3,537 | 39.204545 | 77 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/yolox_mode_switch_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmdet.registry import HOOKS
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"""Switch the mode of YOLOX during training.
This hook turns off the mosaic and mixup data augmentation and switches
to use L1 loss in bbox_head.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to close the data augmentation and switch to L1 loss.
Defaults to 15.
skip_type_keys (Sequence[str], optional): Sequence of type string to be
skip pipeline. Defaults to ('Mosaic', 'RandomAffine', 'MixUp').
"""
def __init__(
self,
num_last_epochs: int = 15,
skip_type_keys: Sequence[str] = ('Mosaic', 'RandomAffine', 'MixUp')
) -> None:
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner) -> None:
"""Close mosaic and mixup augmentation and switches to use L1 loss."""
epoch = runner.epoch
train_loader = runner.train_dataloader
model = runner.model
# TODO: refactor after mmengine using model wrapper
if is_model_wrapper(model):
model = model.module
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
runner.logger.info('No mosaic and mixup aug now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
| 2,434 | 39.583333 | 78 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook'
]
| 799 | 41.105263 | 73 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/num_class_check_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import VGG
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class NumClassCheckHook(Hook):
"""Check whether the `num_classes` in head matches the length of `classes`
in `dataset.metainfo`."""
def _check_head(self, runner: Runner, mode: str) -> None:
"""Check whether the `num_classes` in head matches the length of
`classes` in `dataset.metainfo`.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
assert mode in ['train', 'val']
model = runner.model
dataset = runner.train_dataloader.dataset if mode == 'train' else \
runner.val_dataloader.dataset
if dataset.metainfo.get('classes', None) is None:
runner.logger.warning(
f'Please set `classes` '
f'in the {dataset.__class__.__name__} `metainfo` and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
classes = dataset.metainfo['classes']
assert type(classes) is not str, \
(f'`classes` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'classes = ({classes},)')
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not name.endswith(
'rpn_head') and not isinstance(
module, (VGG, FusedSemanticHead)):
assert module.num_classes == len(classes), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `classes` '
f'{len(classes)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner: Runner) -> None:
"""Check whether the training dataset is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'train')
def before_val_epoch(self, runner: Runner) -> None:
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
self._check_head(runner, 'val')
| 2,813 | 39.782609 | 78 |
py
|
ERD
|
ERD-main/mmdet/engine/hooks/memory_profiler_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
from mmdet.structures import DetDataSample
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def _record_memory_information(self, runner: Runner) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the training or evaluation
process.
"""
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[dict] = None) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (dict, optional): Data from dataloader.
Defaults to None.
outputs (dict, optional): Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
self._record_memory_information(runner)
def after_val_iter(
self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[Sequence[DetDataSample]] = None) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict, optional): Data from dataloader.
Defaults to None.
outputs (Sequence[:obj:`DetDataSample`], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
self._record_memory_information(runner)
def after_test_iter(
self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[Sequence[DetDataSample]] = None) -> None:
"""Regularly record memory information.
Args:
runner (:obj:`Runner`): The runner of the testing process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (dict, optional): Data from dataloader.
Defaults to None.
outputs (Sequence[:obj:`DetDataSample`], optional):
Outputs from model. Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self.interval):
self._record_memory_information(runner)
| 4,650 | 37.122951 | 79 |
py
|
ERD
|
ERD-main/mmdet/engine/optimizers/layer_decay_optimizer_constructor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import json
from typing import List
import torch.nn as nn
from mmengine.dist import get_dist_info
from mmengine.logging import MMLogger
from mmengine.optim import DefaultOptimWrapperConstructor
from mmdet.registry import OPTIM_WRAPPER_CONSTRUCTORS
def get_layer_id_for_convnext(var_name, max_layer_id):
"""Get the layer id to set the different learning rates in ``layer_wise``
decay_type.
Args:
var_name (str): The key of the model.
max_layer_id (int): Maximum layer id.
Returns:
int: The id number corresponding to different learning rate in
``LearningRateDecayOptimizerConstructor``.
"""
if var_name in ('backbone.cls_token', 'backbone.mask_token',
'backbone.pos_embed'):
return 0
elif var_name.startswith('backbone.downsample_layers'):
stage_id = int(var_name.split('.')[2])
if stage_id == 0:
layer_id = 0
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3
elif stage_id == 3:
layer_id = max_layer_id
return layer_id
elif var_name.startswith('backbone.stages'):
stage_id = int(var_name.split('.')[2])
block_id = int(var_name.split('.')[3])
if stage_id == 0:
layer_id = 1
elif stage_id == 1:
layer_id = 2
elif stage_id == 2:
layer_id = 3 + block_id // 3
elif stage_id == 3:
layer_id = max_layer_id
return layer_id
else:
return max_layer_id + 1
def get_stage_id_for_convnext(var_name, max_stage_id):
"""Get the stage id to set the different learning rates in ``stage_wise``
decay_type.
Args:
var_name (str): The key of the model.
max_stage_id (int): Maximum stage id.
Returns:
int: The id number corresponding to different learning rate in
``LearningRateDecayOptimizerConstructor``.
"""
if var_name in ('backbone.cls_token', 'backbone.mask_token',
'backbone.pos_embed'):
return 0
elif var_name.startswith('backbone.downsample_layers'):
return 0
elif var_name.startswith('backbone.stages'):
stage_id = int(var_name.split('.')[2])
return stage_id + 1
else:
return max_stage_id - 1
@OPTIM_WRAPPER_CONSTRUCTORS.register_module()
class LearningRateDecayOptimizerConstructor(DefaultOptimWrapperConstructor):
# Different learning rates are set for different layers of backbone.
# Note: Currently, this optimizer constructor is built for ConvNeXt.
def add_params(self, params: List[dict], module: nn.Module,
**kwargs) -> None:
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
"""
logger = MMLogger.get_current_instance()
parameter_groups = {}
logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')
num_layers = self.paramwise_cfg.get('num_layers') + 2
decay_rate = self.paramwise_cfg.get('decay_rate')
decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise')
logger.info('Build LearningRateDecayOptimizerConstructor '
f'{decay_type} {decay_rate} - {num_layers}')
weight_decay = self.base_wd
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith('.bias') or name in (
'pos_embed', 'cls_token'):
group_name = 'no_decay'
this_weight_decay = 0.
else:
group_name = 'decay'
this_weight_decay = weight_decay
if 'layer_wise' in decay_type:
if 'ConvNeXt' in module.backbone.__class__.__name__:
layer_id = get_layer_id_for_convnext(
name, self.paramwise_cfg.get('num_layers'))
logger.info(f'set param {name} as id {layer_id}')
else:
raise NotImplementedError()
elif decay_type == 'stage_wise':
if 'ConvNeXt' in module.backbone.__class__.__name__:
layer_id = get_stage_id_for_convnext(name, num_layers)
logger.info(f'set param {name} as id {layer_id}')
else:
raise NotImplementedError()
group_name = f'layer_{layer_id}_{group_name}'
if group_name not in parameter_groups:
scale = decay_rate**(num_layers - layer_id - 1)
parameter_groups[group_name] = {
'weight_decay': this_weight_decay,
'params': [],
'param_names': [],
'lr_scale': scale,
'group_name': group_name,
'lr': scale * self.base_lr,
}
parameter_groups[group_name]['params'].append(param)
parameter_groups[group_name]['param_names'].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
'param_names': parameter_groups[key]['param_names'],
'lr_scale': parameter_groups[key]['lr_scale'],
'lr': parameter_groups[key]['lr'],
'weight_decay': parameter_groups[key]['weight_decay'],
}
logger.info(f'Param groups = {json.dumps(to_display, indent=2)}')
params.extend(parameter_groups.values())
| 6,020 | 36.867925 | 77 |
py
|
ERD
|
ERD-main/mmdet/engine/optimizers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .layer_decay_optimizer_constructor import \
LearningRateDecayOptimizerConstructor
__all__ = ['LearningRateDecayOptimizerConstructor']
| 192 | 31.166667 | 51 |
py
|
ERD
|
ERD-main/mmdet/engine/schedulers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .quadratic_warmup import (QuadraticWarmupLR, QuadraticWarmupMomentum,
QuadraticWarmupParamScheduler)
__all__ = [
'QuadraticWarmupParamScheduler', 'QuadraticWarmupMomentum',
'QuadraticWarmupLR'
]
| 288 | 31.111111 | 74 |
py
|
ERD
|
ERD-main/mmdet/engine/schedulers/quadratic_warmup.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.optim.scheduler.lr_scheduler import LRSchedulerMixin
from mmengine.optim.scheduler.momentum_scheduler import MomentumSchedulerMixin
from mmengine.optim.scheduler.param_scheduler import INF, _ParamScheduler
from torch.optim import Optimizer
from mmdet.registry import PARAM_SCHEDULERS
@PARAM_SCHEDULERS.register_module()
class QuadraticWarmupParamScheduler(_ParamScheduler):
r"""Warm up the parameter value of each parameter group by quadratic
formula:
.. math::
X_{t} = X_{t-1} + \frac{2t+1}{{(end-begin)}^{2}} \times X_{base}
Args:
optimizer (Optimizer): Wrapped optimizer.
param_name (str): Name of the parameter to be adjusted, such as
``lr``, ``momentum``.
begin (int): Step at which to start updating the parameters.
Defaults to 0.
end (int): Step at which to stop updating the parameters.
Defaults to INF.
last_step (int): The index of last step. Used for resume without
state dict. Defaults to -1.
by_epoch (bool): Whether the scheduled parameters are updated by
epochs. Defaults to True.
verbose (bool): Whether to print the value for each update.
Defaults to False.
"""
def __init__(self,
optimizer: Optimizer,
param_name: str,
begin: int = 0,
end: int = INF,
last_step: int = -1,
by_epoch: bool = True,
verbose: bool = False):
if end >= INF:
raise ValueError('``end`` must be less than infinity,'
'Please set ``end`` parameter of '
'``QuadraticWarmupScheduler`` as the '
'number of warmup end.')
self.total_iters = end - begin
super().__init__(
optimizer=optimizer,
param_name=param_name,
begin=begin,
end=end,
last_step=last_step,
by_epoch=by_epoch,
verbose=verbose)
@classmethod
def build_iter_from_epoch(cls,
*args,
begin=0,
end=INF,
by_epoch=True,
epoch_length=None,
**kwargs):
"""Build an iter-based instance of this scheduler from an epoch-based
config."""
assert by_epoch, 'Only epoch-based kwargs whose `by_epoch=True` can ' \
'be converted to iter-based.'
assert epoch_length is not None and epoch_length > 0, \
f'`epoch_length` must be a positive integer, ' \
f'but got {epoch_length}.'
by_epoch = False
begin = begin * epoch_length
if end != INF:
end = end * epoch_length
return cls(*args, begin=begin, end=end, by_epoch=by_epoch, **kwargs)
def _get_value(self):
"""Compute value using chainable form of the scheduler."""
if self.last_step == 0:
return [
base_value * (2 * self.last_step + 1) / self.total_iters**2
for base_value in self.base_values
]
return [
group[self.param_name] + base_value *
(2 * self.last_step + 1) / self.total_iters**2
for base_value, group in zip(self.base_values,
self.optimizer.param_groups)
]
@PARAM_SCHEDULERS.register_module()
class QuadraticWarmupLR(LRSchedulerMixin, QuadraticWarmupParamScheduler):
"""Warm up the learning rate of each parameter group by quadratic formula.
Args:
optimizer (Optimizer): Wrapped optimizer.
begin (int): Step at which to start updating the parameters.
Defaults to 0.
end (int): Step at which to stop updating the parameters.
Defaults to INF.
last_step (int): The index of last step. Used for resume without
state dict. Defaults to -1.
by_epoch (bool): Whether the scheduled parameters are updated by
epochs. Defaults to True.
verbose (bool): Whether to print the value for each update.
Defaults to False.
"""
@PARAM_SCHEDULERS.register_module()
class QuadraticWarmupMomentum(MomentumSchedulerMixin,
QuadraticWarmupParamScheduler):
"""Warm up the momentum value of each parameter group by quadratic formula.
Args:
optimizer (Optimizer): Wrapped optimizer.
begin (int): Step at which to start updating the parameters.
Defaults to 0.
end (int): Step at which to stop updating the parameters.
Defaults to INF.
last_step (int): The index of last step. Used for resume without
state dict. Defaults to -1.
by_epoch (bool): Whether the scheduled parameters are updated by
epochs. Defaults to True.
verbose (bool): Whether to print the value for each update.
Defaults to False.
"""
| 5,176 | 38.219697 | 79 |
py
|
ERD
|
ERD-main/mmdet/utils/contextmanagers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,125 | 32.544715 | 79 |
py
|
ERD
|
ERD-main/mmdet/utils/util_mixins.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the object defines a ``__len__``
method, then the ``__nice__`` method defaults to something sensible, otherwise
it is treated as abstract and raises ``NotImplementedError``.
To use simply have your object inherit from :class:`NiceRepr`
(multi-inheritance should be ok).
This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
Example:
>>> # Objects that define __nice__ have a default __str__ and __repr__
>>> class Student(NiceRepr):
... def __init__(self, name):
... self.name = name
... def __nice__(self):
... return self.name
>>> s1 = Student('Alice')
>>> s2 = Student('Bob')
>>> print(f's1 = {s1}')
>>> print(f's2 = {s2}')
s1 = <Student(Alice)>
s2 = <Student(Bob)>
Example:
>>> # Objects that define __len__ have a default __nice__
>>> class Group(NiceRepr):
... def __init__(self, data):
... self.data = data
... def __len__(self):
... return len(self.data)
>>> g = Group([1, 2, 3])
>>> print(f'g = {g}')
g = <Group(3)>
"""
import warnings
class NiceRepr:
"""Inherit from this class and define ``__nice__`` to "nicely" print your
objects.
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
If the inheriting class has a ``__len__``, method then the default
``__nice__`` method will return its length.
Example:
>>> class Foo(NiceRepr):
... def __nice__(self):
... return 'info'
>>> foo = Foo()
>>> assert str(foo) == '<Foo(info)>'
>>> assert repr(foo).startswith('<Foo(info) at ')
Example:
>>> class Bar(NiceRepr):
... pass
>>> bar = Bar()
>>> import pytest
>>> with pytest.warns(None) as record:
>>> assert 'object at' in str(bar)
>>> assert 'object at' in repr(bar)
Example:
>>> class Baz(NiceRepr):
... def __len__(self):
... return 5
>>> baz = Baz()
>>> assert str(baz) == '<Baz(5)>'
"""
def __nice__(self):
"""str: a "nice" summary string describing this module"""
if hasattr(self, '__len__'):
# It is a common pattern for objects to use __len__ in __nice__
# As a convenience we define a default __nice__ for these objects
return str(len(self))
else:
# In all other cases force the subclass to overload __nice__
raise NotImplementedError(
f'Define the __nice__ method for {self.__class__!r}')
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f'<{classname}({nice}) at {hex(id(self))}>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f'<{classname}({nice})>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
| 3,712 | 34.028302 | 78 |
py
|
ERD
|
ERD-main/mmdet/utils/benchmark.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import time
from functools import partial
from typing import List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import fuse_conv_bn
# TODO need update
# from mmcv.runner import wrap_fp16_model
from mmengine import MMLogger
from mmengine.config import Config
from mmengine.device import get_max_cuda_memory
from mmengine.dist import get_world_size
from mmengine.runner import Runner, load_checkpoint
from mmengine.utils.dl_utils import set_multi_processing
from torch.nn.parallel import DistributedDataParallel
from mmdet.registry import DATASETS, MODELS
try:
import psutil
except ImportError:
psutil = None
def custom_round(value: Union[int, float],
factor: Union[int, float],
precision: int = 2) -> float:
"""Custom round function."""
return round(value / factor, precision)
gb_round = partial(custom_round, factor=1024**3)
def print_log(msg: str, logger: Optional[MMLogger] = None) -> None:
"""Print a log message."""
if logger is None:
print(msg, flush=True)
else:
logger.info(msg)
def print_process_memory(p: psutil.Process,
logger: Optional[MMLogger] = None) -> None:
"""print process memory info."""
mem_used = gb_round(psutil.virtual_memory().used)
memory_full_info = p.memory_full_info()
uss_mem = gb_round(memory_full_info.uss)
pss_mem = gb_round(memory_full_info.pss)
for children in p.children():
child_mem_info = children.memory_full_info()
uss_mem += gb_round(child_mem_info.uss)
pss_mem += gb_round(child_mem_info.pss)
process_count = 1 + len(p.children())
print_log(
f'(GB) mem_used: {mem_used:.2f} | uss: {uss_mem:.2f} | '
f'pss: {pss_mem:.2f} | total_proc: {process_count}', logger)
class BaseBenchmark:
"""The benchmark base class.
The ``run`` method is an external calling interface, and it will
call the ``run_once`` method ``repeat_num`` times for benchmarking.
Finally, call the ``average_multiple_runs`` method to further process
the results of multiple runs.
Args:
max_iter (int): maximum iterations of benchmark.
log_interval (int): interval of logging.
num_warmup (int): Number of Warmup.
logger (MMLogger, optional): Formatted logger used to record messages.
"""
def __init__(self,
max_iter: int,
log_interval: int,
num_warmup: int,
logger: Optional[MMLogger] = None):
self.max_iter = max_iter
self.log_interval = log_interval
self.num_warmup = num_warmup
self.logger = logger
def run(self, repeat_num: int = 1) -> dict:
"""benchmark entry method.
Args:
repeat_num (int): Number of repeat benchmark.
Defaults to 1.
"""
assert repeat_num >= 1
results = []
for _ in range(repeat_num):
results.append(self.run_once())
results = self.average_multiple_runs(results)
return results
def run_once(self) -> dict:
"""Executes the benchmark once."""
raise NotImplementedError()
def average_multiple_runs(self, results: List[dict]) -> dict:
"""Average the results of multiple runs."""
raise NotImplementedError()
class InferenceBenchmark(BaseBenchmark):
"""The inference benchmark class. It will be statistical inference FPS,
CUDA memory and CPU memory information.
Args:
cfg (mmengine.Config): config.
checkpoint (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``.
distributed (bool): distributed testing flag.
is_fuse_conv_bn (bool): Whether to fuse conv and bn, this will
slightly increase the inference speed.
max_iter (int): maximum iterations of benchmark. Defaults to 2000.
log_interval (int): interval of logging. Defaults to 50.
num_warmup (int): Number of Warmup. Defaults to 5.
logger (MMLogger, optional): Formatted logger used to record messages.
"""
def __init__(self,
cfg: Config,
checkpoint: str,
distributed: bool,
is_fuse_conv_bn: bool,
max_iter: int = 2000,
log_interval: int = 50,
num_warmup: int = 5,
logger: Optional[MMLogger] = None):
super().__init__(max_iter, log_interval, num_warmup, logger)
assert get_world_size(
) == 1, 'Inference benchmark does not allow distributed multi-GPU'
self.cfg = copy.deepcopy(cfg)
self.distributed = distributed
if psutil is None:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
self._process = psutil.Process()
env_cfg = self.cfg.get('env_cfg')
if env_cfg.get('cudnn_benchmark'):
torch.backends.cudnn.benchmark = True
mp_cfg: dict = env_cfg.get('mp_cfg', {})
set_multi_processing(**mp_cfg, distributed=self.distributed)
print_log('before build: ', self.logger)
print_process_memory(self._process, self.logger)
self.model = self._init_model(checkpoint, is_fuse_conv_bn)
# Because multiple processes will occupy additional CPU resources,
# FPS statistics will be more unstable when num_workers is not 0.
# It is reasonable to set num_workers to 0.
dataloader_cfg = cfg.test_dataloader
dataloader_cfg['num_workers'] = 0
dataloader_cfg['batch_size'] = 1
dataloader_cfg['persistent_workers'] = False
self.data_loader = Runner.build_dataloader(dataloader_cfg)
print_log('after build: ', self.logger)
print_process_memory(self._process, self.logger)
def _init_model(self, checkpoint: str, is_fuse_conv_bn: bool) -> nn.Module:
"""Initialize the model."""
model = MODELS.build(self.cfg.model)
# TODO need update
# fp16_cfg = self.cfg.get('fp16', None)
# if fp16_cfg is not None:
# wrap_fp16_model(model)
load_checkpoint(model, checkpoint, map_location='cpu')
if is_fuse_conv_bn:
model = fuse_conv_bn(model)
model = model.cuda()
if self.distributed:
model = DistributedDataParallel(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=False)
model.eval()
return model
def run_once(self) -> dict:
"""Executes the benchmark once."""
pure_inf_time = 0
fps = 0
for i, data in enumerate(self.data_loader):
if (i + 1) % self.log_interval == 0:
print_log('==================================', self.logger)
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
self.model.test_step(data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= self.num_warmup:
pure_inf_time += elapsed
if (i + 1) % self.log_interval == 0:
fps = (i + 1 - self.num_warmup) / pure_inf_time
cuda_memory = get_max_cuda_memory()
print_log(
f'Done image [{i + 1:<3}/{self.max_iter}], '
f'fps: {fps:.1f} img/s, '
f'times per image: {1000 / fps:.1f} ms/img, '
f'cuda memory: {cuda_memory} MB', self.logger)
print_process_memory(self._process, self.logger)
if (i + 1) == self.max_iter:
fps = (i + 1 - self.num_warmup) / pure_inf_time
break
return {'fps': fps}
def average_multiple_runs(self, results: List[dict]) -> dict:
"""Average the results of multiple runs."""
print_log('============== Done ==================', self.logger)
fps_list_ = [round(result['fps'], 1) for result in results]
avg_fps_ = sum(fps_list_) / len(fps_list_)
outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_}
if len(fps_list_) > 1:
times_pre_image_list_ = [
round(1000 / result['fps'], 1) for result in results
]
avg_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print_log(
f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, '
'times per image: '
f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] '
'ms/img', self.logger)
else:
print_log(
f'Overall fps: {fps_list_[0]:.1f} img/s, '
f'times per image: {1000 / fps_list_[0]:.1f} ms/img',
self.logger)
print_log(f'cuda memory: {get_max_cuda_memory()} MB', self.logger)
print_process_memory(self._process, self.logger)
return outputs
class DataLoaderBenchmark(BaseBenchmark):
"""The dataloader benchmark class. It will be statistical inference FPS and
CPU memory information.
Args:
cfg (mmengine.Config): config.
distributed (bool): distributed testing flag.
dataset_type (str): benchmark data type, only supports ``train``,
``val`` and ``test``.
max_iter (int): maximum iterations of benchmark. Defaults to 2000.
log_interval (int): interval of logging. Defaults to 50.
num_warmup (int): Number of Warmup. Defaults to 5.
logger (MMLogger, optional): Formatted logger used to record messages.
"""
def __init__(self,
cfg: Config,
distributed: bool,
dataset_type: str,
max_iter: int = 2000,
log_interval: int = 50,
num_warmup: int = 5,
logger: Optional[MMLogger] = None):
super().__init__(max_iter, log_interval, num_warmup, logger)
assert dataset_type in ['train', 'val', 'test'], \
'dataset_type only supports train,' \
f' val and test, but got {dataset_type}'
assert get_world_size(
) == 1, 'Dataloader benchmark does not allow distributed multi-GPU'
self.cfg = copy.deepcopy(cfg)
self.distributed = distributed
if psutil is None:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
self._process = psutil.Process()
mp_cfg = self.cfg.get('env_cfg', {}).get('mp_cfg')
if mp_cfg is not None:
set_multi_processing(distributed=self.distributed, **mp_cfg)
else:
set_multi_processing(distributed=self.distributed)
print_log('before build: ', self.logger)
print_process_memory(self._process, self.logger)
if dataset_type == 'train':
self.data_loader = Runner.build_dataloader(cfg.train_dataloader)
elif dataset_type == 'test':
self.data_loader = Runner.build_dataloader(cfg.test_dataloader)
else:
self.data_loader = Runner.build_dataloader(cfg.val_dataloader)
self.batch_size = self.data_loader.batch_size
self.num_workers = self.data_loader.num_workers
print_log('after build: ', self.logger)
print_process_memory(self._process, self.logger)
def run_once(self) -> dict:
"""Executes the benchmark once."""
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
start_time = time.perf_counter()
for i, data in enumerate(self.data_loader):
elapsed = time.perf_counter() - start_time
if (i + 1) % self.log_interval == 0:
print_log('==================================', self.logger)
if i >= self.num_warmup:
pure_inf_time += elapsed
if (i + 1) % self.log_interval == 0:
fps = (i + 1 - self.num_warmup) / pure_inf_time
print_log(
f'Done batch [{i + 1:<3}/{self.max_iter}], '
f'fps: {fps:.1f} batch/s, '
f'times per batch: {1000 / fps:.1f} ms/batch, '
f'batch size: {self.batch_size}, num_workers: '
f'{self.num_workers}', self.logger)
print_process_memory(self._process, self.logger)
if (i + 1) == self.max_iter:
fps = (i + 1 - self.num_warmup) / pure_inf_time
break
start_time = time.perf_counter()
return {'fps': fps}
def average_multiple_runs(self, results: List[dict]) -> dict:
"""Average the results of multiple runs."""
print_log('============== Done ==================', self.logger)
fps_list_ = [round(result['fps'], 1) for result in results]
avg_fps_ = sum(fps_list_) / len(fps_list_)
outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_}
if len(fps_list_) > 1:
times_pre_image_list_ = [
round(1000 / result['fps'], 1) for result in results
]
avg_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print_log(
f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, '
'times per batch: '
f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] '
f'ms/batch, batch size: {self.batch_size}, num_workers: '
f'{self.num_workers}', self.logger)
else:
print_log(
f'Overall fps: {fps_list_[0]:.1f} batch/s, '
f'times per batch: {1000 / fps_list_[0]:.1f} ms/batch, '
f'batch size: {self.batch_size}, num_workers: '
f'{self.num_workers}', self.logger)
print_process_memory(self._process, self.logger)
return outputs
class DatasetBenchmark(BaseBenchmark):
"""The dataset benchmark class. It will be statistical inference FPS, FPS
pre transform and CPU memory information.
Args:
cfg (mmengine.Config): config.
dataset_type (str): benchmark data type, only supports ``train``,
``val`` and ``test``.
max_iter (int): maximum iterations of benchmark. Defaults to 2000.
log_interval (int): interval of logging. Defaults to 50.
num_warmup (int): Number of Warmup. Defaults to 5.
logger (MMLogger, optional): Formatted logger used to record messages.
"""
def __init__(self,
cfg: Config,
dataset_type: str,
max_iter: int = 2000,
log_interval: int = 50,
num_warmup: int = 5,
logger: Optional[MMLogger] = None):
super().__init__(max_iter, log_interval, num_warmup, logger)
assert dataset_type in ['train', 'val', 'test'], \
'dataset_type only supports train,' \
f' val and test, but got {dataset_type}'
assert get_world_size(
) == 1, 'Dataset benchmark does not allow distributed multi-GPU'
self.cfg = copy.deepcopy(cfg)
if dataset_type == 'train':
dataloader_cfg = copy.deepcopy(cfg.train_dataloader)
elif dataset_type == 'test':
dataloader_cfg = copy.deepcopy(cfg.test_dataloader)
else:
dataloader_cfg = copy.deepcopy(cfg.val_dataloader)
dataset_cfg = dataloader_cfg.pop('dataset')
dataset = DATASETS.build(dataset_cfg)
if hasattr(dataset, 'full_init'):
dataset.full_init()
self.dataset = dataset
def run_once(self) -> dict:
"""Executes the benchmark once."""
pure_inf_time = 0
fps = 0
total_index = list(range(len(self.dataset)))
np.random.shuffle(total_index)
start_time = time.perf_counter()
for i, idx in enumerate(total_index):
if (i + 1) % self.log_interval == 0:
print_log('==================================', self.logger)
get_data_info_start_time = time.perf_counter()
data_info = self.dataset.get_data_info(idx)
get_data_info_elapsed = time.perf_counter(
) - get_data_info_start_time
if (i + 1) % self.log_interval == 0:
print_log(f'get_data_info - {get_data_info_elapsed * 1000} ms',
self.logger)
for t in self.dataset.pipeline.transforms:
transform_start_time = time.perf_counter()
data_info = t(data_info)
transform_elapsed = time.perf_counter() - transform_start_time
if (i + 1) % self.log_interval == 0:
print_log(
f'{t.__class__.__name__} - '
f'{transform_elapsed * 1000} ms', self.logger)
if data_info is None:
break
elapsed = time.perf_counter() - start_time
if i >= self.num_warmup:
pure_inf_time += elapsed
if (i + 1) % self.log_interval == 0:
fps = (i + 1 - self.num_warmup) / pure_inf_time
print_log(
f'Done img [{i + 1:<3}/{self.max_iter}], '
f'fps: {fps:.1f} img/s, '
f'times per img: {1000 / fps:.1f} ms/img', self.logger)
if (i + 1) == self.max_iter:
fps = (i + 1 - self.num_warmup) / pure_inf_time
break
start_time = time.perf_counter()
return {'fps': fps}
def average_multiple_runs(self, results: List[dict]) -> dict:
"""Average the results of multiple runs."""
print_log('============== Done ==================', self.logger)
fps_list_ = [round(result['fps'], 1) for result in results]
avg_fps_ = sum(fps_list_) / len(fps_list_)
outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_}
if len(fps_list_) > 1:
times_pre_image_list_ = [
round(1000 / result['fps'], 1) for result in results
]
avg_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print_log(
f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, '
'times per img: '
f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] '
'ms/img', self.logger)
else:
print_log(
f'Overall fps: {fps_list_[0]:.1f} img/s, '
f'times per img: {1000 / fps_list_[0]:.1f} ms/img',
self.logger)
return outputs
| 19,185 | 35.684512 | 79 |
py
|
ERD
|
ERD-main/mmdet/utils/memory.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import abc
from contextlib import contextmanager
from functools import wraps
import torch
from mmengine.logging import MMLogger
def cast_tensor_type(inputs, src_type=None, dst_type=None):
"""Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.
Args:
inputs: Inputs that to be casted.
src_type (torch.dtype | torch.device): Source type.
src_type (torch.dtype | torch.device): Destination type.
Returns:
The same type with inputs, but all contained Tensors have been cast.
"""
assert dst_type is not None
if isinstance(inputs, torch.Tensor):
if isinstance(dst_type, torch.device):
# convert Tensor to dst_device
if hasattr(inputs, 'to') and \
hasattr(inputs, 'device') and \
(inputs.device == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
else:
# convert Tensor to dst_dtype
if hasattr(inputs, 'to') and \
hasattr(inputs, 'dtype') and \
(inputs.dtype == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
# we need to ensure that the type of inputs to be casted are the same
# as the argument `src_type`.
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type=src_type, dst_type=dst_type)
for item in inputs)
# TODO: Currently not supported
# elif isinstance(inputs, InstanceData):
# for key, value in inputs.items():
# inputs[key] = cast_tensor_type(
# value, src_type=src_type, dst_type=dst_type)
# return inputs
else:
return inputs
@contextmanager
def _ignore_torch_cuda_oom():
"""A context which ignores CUDA OOM exception from pytorch.
Code is modified from
<https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py> # noqa: E501
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if 'CUDA out of memory. ' in str(e):
pass
else:
raise
class AvoidOOM:
"""Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of
Memory error. It will do the following steps:
1. First retry after calling `torch.cuda.empty_cache()`.
2. If that still fails, it will then retry by converting inputs
to FP16.
3. If that still fails trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to
CPU implementation.
Args:
to_cpu (bool): Whether to convert outputs to CPU if get an OOM
error. This will slow down the code significantly.
Defaults to True.
test (bool): Skip `_ignore_torch_cuda_oom` operate that can use
lightweight data in unit test, only used in
test unit. Defaults to False.
Examples:
>>> from mmdet.utils.memory import AvoidOOM
>>> AvoidCUDAOOM = AvoidOOM()
>>> output = AvoidOOM.retry_if_cuda_oom(
>>> some_torch_function)(input1, input2)
>>> # To use as a decorator
>>> # from mmdet.utils import AvoidCUDAOOM
>>> @AvoidCUDAOOM.retry_if_cuda_oom
>>> def function(*args, **kwargs):
>>> return None
```
Note:
1. The output may be on CPU even if inputs are on GPU. Processing
on CPU will slow down the code significantly.
2. When converting inputs to CPU, it will only look at each argument
and check if it has `.device` and `.to` for conversion. Nested
structures of tensors are not supported.
3. Since the function might be called more than once, it has to be
stateless.
"""
def __init__(self, to_cpu=True, test=False):
self.to_cpu = to_cpu
self.test = test
def retry_if_cuda_oom(self, func):
"""Makes a function retry itself after encountering pytorch's CUDA OOM
error.
The implementation logic is referred to
https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py
Args:
func: a stateless callable that takes tensor-like objects
as arguments.
Returns:
func: a callable which retries `func` if OOM is encountered.
""" # noqa: W605
@wraps(func)
def wrapped(*args, **kwargs):
# raw function
if not self.test:
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# get the type and device of first tensor
dtype, device = None, None
values = args + tuple(kwargs.values())
for value in values:
if isinstance(value, torch.Tensor):
dtype = value.dtype
device = value.device
break
if dtype is None or device is None:
raise ValueError('There is no tensor in the inputs, '
'cannot get dtype and device.')
# Convert to FP16
fp16_args = cast_tensor_type(args, dst_type=torch.half)
fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)
logger = MMLogger.get_current_instance()
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to FP16 due to CUDA OOM')
# get input tensor type, the output type will same as
# the first parameter type.
with _ignore_torch_cuda_oom():
output = func(*fp16_args, **fp16_kwargs)
output = cast_tensor_type(
output, src_type=torch.half, dst_type=dtype)
if not self.test:
return output
logger.warning('Using FP16 still meet CUDA OOM')
# Try on CPU. This will slow down the code significantly,
# therefore print a notice.
if self.to_cpu:
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to CPU due to CUDA OOM')
cpu_device = torch.empty(0).device
cpu_args = cast_tensor_type(args, dst_type=cpu_device)
cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)
# convert outputs to GPU
with _ignore_torch_cuda_oom():
logger.warning(f'Convert outputs to GPU (device={device})')
output = func(*cpu_args, **cpu_kwargs)
output = cast_tensor_type(
output, src_type=cpu_device, dst_type=device)
return output
warnings.warn('Cannot convert output to GPU due to CUDA OOM, '
'the output is now on CPU, which might cause '
'errors if the output need to interact with GPU '
'data in subsequent operations')
logger.warning('Cannot convert output to GPU due to '
'CUDA OOM, the output is on CPU now.')
return func(*cpu_args, **cpu_kwargs)
else:
# may still get CUDA OOM error
return func(*args, **kwargs)
return wrapped
# To use AvoidOOM as a decorator
AvoidCUDAOOM = AvoidOOM()
| 8,099 | 37.028169 | 103 |
py
|
ERD
|
ERD-main/mmdet/utils/typing_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Sequence, Tuple, Union
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData, PixelData
# TODO: Need to avoid circular import with assigner and sampler
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
PixelList = List[PixelData]
OptPixelList = Optional[PixelList]
RangeType = Sequence[Tuple[int, int]]
| 737 | 31.086957 | 63 |
py
|
ERD
|
ERD-main/mmdet/utils/replace_cfg_vals.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import re
from mmengine.config import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, replace "${key0.key1}"
with the value of cfg.key0.key1. Code is modified from `vars.py
< https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501
Args:
ori_cfg (mmengine.config.Config):
The origin config with "${key}" generated from a file.
Returns:
updated_cfg [mmengine.config.Config]:
The config with "${key}" replaced by the corresponding value.
"""
def get_value(cfg, key):
for k in key.split('.'):
cfg = cfg[k]
return cfg
def replace_value(cfg):
if isinstance(cfg, dict):
return {key: replace_value(value) for key, value in cfg.items()}
elif isinstance(cfg, list):
return [replace_value(item) for item in cfg]
elif isinstance(cfg, tuple):
return tuple([replace_value(item) for item in cfg])
elif isinstance(cfg, str):
# the format of string cfg may be:
# 1) "${key}", which will be replaced with cfg.key directly
# 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx",
# which will be replaced with the string of the cfg.key
keys = pattern_key.findall(cfg)
values = [get_value(ori_cfg, key[2:-1]) for key in keys]
if len(keys) == 1 and keys[0] == cfg:
# the format of string cfg is "${key}"
cfg = values[0]
else:
for key, value in zip(keys, values):
# the format of string cfg is
# "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx"
assert not isinstance(value, (dict, list, tuple)), \
f'for the format of string cfg is ' \
f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \
f"the type of the value of '${key}' " \
f'can not be dict, list, or tuple' \
f'but you input {type(value)} in {cfg}'
cfg = cfg.replace(key, str(value))
return cfg
else:
return cfg
# the pattern of string "${key}"
pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}')
# the type of ori_cfg._cfg_dict is mmengine.config.ConfigDict
updated_cfg = Config(
replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)
# replace the model with model_wrapper
if updated_cfg.get('model_wrapper', None) is not None:
updated_cfg.model = updated_cfg.model_wrapper
updated_cfg.pop('model_wrapper')
return updated_cfg
| 2,914 | 40.056338 | 92 |
py
|
ERD
|
ERD-main/mmdet/utils/profiling.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of code
suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
print(msg, end_stream)
| 1,336 | 31.609756 | 73 |
py
|
ERD
|
ERD-main/mmdet/utils/dist_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import pickle
import warnings
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
from mmengine.dist import get_dist_info
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
"""Allreduce gradients.
Args:
params (list[torch.Parameters]): List of parameters of a model
coalesce (bool, optional): Whether allreduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
def obj2tensor(pyobj, device='cuda'):
"""Serialize picklable python object to tensor."""
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
def tensor2obj(tensor):
"""Deserialize tensor to picklable python object."""
return pickle.loads(tensor.cpu().numpy().tobytes())
@functools.lru_cache()
def _get_global_gloo_group():
"""Return a process group based on gloo backend, containing all the ranks
The result is cached."""
if dist.get_backend() == 'nccl':
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"""Apply all reduce function for python dict object.
The code is modified from https://github.com/Megvii-
BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.
NOTE: make sure that py_dict in different ranks has the same keys and
the values should be in the same shape. Currently only supports
nccl backend.
Args:
py_dict (dict): Dict to be applied all reduce op.
op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'
group (:obj:`torch.distributed.group`, optional): Distributed group,
Default: None.
to_float (bool): Whether to convert all values of dict to float.
Default: True.
Returns:
OrderedDict: reduced python dict object.
"""
warnings.warn(
'group` is deprecated. Currently only supports NCCL backend.')
_, world_size = get_dist_info()
if world_size == 1:
return py_dict
# all reduce logic across different devices.
py_key = list(py_dict.keys())
if not isinstance(py_dict, OrderedDict):
py_key_tensor = obj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2obj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
if to_float:
warnings.warn('Note: the "to_float" is True, you need to '
'ensure that the behavior is reasonable.')
flatten_tensor = torch.cat(
[py_dict[k].flatten().float() for k in py_key])
else:
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)
if op == 'mean':
flatten_tensor /= world_size
split_tensors = [
x.reshape(shape) for x, shape in zip(
torch.split(flatten_tensor, tensor_numels), tensor_shapes)
]
out_dict = {k: v for k, v in zip(py_key, split_tensors)}
if isinstance(py_dict, OrderedDict):
out_dict = OrderedDict(out_dict)
return out_dict
def sync_random_seed(seed=None, device='cuda'):
"""Make sure different ranks share the same seed.
All workers must call this function, otherwise it will deadlock.
This method is generally used in `DistributedSampler`,
because the seed should be identical across all processes
in the distributed group.
In distributed sampling, different ranks should sample non-overlapped
data in the dataset. Therefore, this function is used to make sure that
each rank shuffles the data indices in the same order based
on the same seed. Then different ranks could use different indices
to select non-overlapped data from the same data list.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is None:
seed = np.random.randint(2**31)
assert isinstance(seed, int)
rank, world_size = get_dist_info()
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
| 6,194 | 32.486486 | 77 |
py
|
ERD
|
ERD-main/mmdet/utils/misc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
from typing import Union
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (:obj:`Config`): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, Config), \
f'cfg got wrong type: {type(cfg)}, expected mmengine.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
def get_test_pipeline_cfg(cfg: Union[str, ConfigDict]) -> ConfigDict:
"""Get the test dataset pipeline from entire config.
Args:
cfg (str or :obj:`ConfigDict`): the entire config. Can be a config
file or a ``ConfigDict``.
Returns:
:obj:`ConfigDict`: the config of test dataset.
"""
if isinstance(cfg, str):
cfg = Config.fromfile(cfg)
def _get_test_pipeline_cfg(dataset_cfg):
if 'pipeline' in dataset_cfg:
return dataset_cfg.pipeline
# handle dataset wrapper
elif 'dataset' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.dataset)
# handle dataset wrappers like ConcatDataset
elif 'datasets' in dataset_cfg:
return _get_test_pipeline_cfg(dataset_cfg.datasets[0])
raise RuntimeError('Cannot find `pipeline` in `test_dataloader`')
return _get_test_pipeline_cfg(cfg.test_dataloader.dataset)
| 3,445 | 31.509434 | 74 |
py
|
ERD
|
ERD-main/mmdet/utils/setup_env.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import logging
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
from mmengine import DefaultScope
from mmengine.logging import print_log
from mmengine.utils import digit_version
def setup_cache_size_limit_of_dynamo():
"""Setup cache size limit of dynamo.
Note: Due to the dynamic shape of the loss calculation and
post-processing parts in the object detection algorithm, these
functions must be compiled every time they are run.
Setting a large value for torch._dynamo.config.cache_size_limit
may result in repeated compilation, which can slow down training
and testing speed. Therefore, we need to set the default value of
cache_size_limit smaller. An empirical value is 4.
"""
import torch
if digit_version(torch.__version__) >= digit_version('2.0.0'):
if 'DYNAMO_CACHE_SIZE_LIMIT' in os.environ:
import torch._dynamo
cache_size_limit = int(os.environ['DYNAMO_CACHE_SIZE_LIMIT'])
torch._dynamo.config.cache_size_limit = cache_size_limit
print_log(
f'torch._dynamo.config.cache_size_limit is force '
f'set to {cache_size_limit}.',
logger='current',
level=logging.WARNING)
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
workers_per_gpu = cfg.data.get('workers_per_gpu', 1)
if 'train_dataloader' in cfg.data:
workers_per_gpu = \
max(cfg.data.train_dataloader.get('workers_per_gpu', 1),
workers_per_gpu)
if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
def register_all_modules(init_default_scope: bool = True) -> None:
"""Register all modules in mmdet into the registries.
Args:
init_default_scope (bool): Whether initialize the mmdet default scope.
When `init_default_scope=True`, the global default scope will be
set to `mmdet`, and all registries will build modules from mmdet's
registry node. To understand more about the registry, please refer
to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md
Defaults to True.
""" # noqa
import mmdet.datasets # noqa: F401,F403
import mmdet.engine # noqa: F401,F403
import mmdet.evaluation # noqa: F401,F403
import mmdet.models # noqa: F401,F403
import mmdet.visualization # noqa: F401,F403
if init_default_scope:
never_created = DefaultScope.get_current_instance() is None \
or not DefaultScope.check_instance_created('mmdet')
if never_created:
DefaultScope.get_instance('mmdet', scope_name='mmdet')
return
current_scope = DefaultScope.get_current_instance()
if current_scope.scope_name != 'mmdet':
warnings.warn('The current default scope '
f'"{current_scope.scope_name}" is not "mmdet", '
'`register_all_modules` will force the current'
'default scope to be "mmdet". If this is not '
'expected, please set `init_default_scope=False`.')
# avoid name conflict
new_instance_name = f'mmdet-{datetime.datetime.now()}'
DefaultScope.get_instance(new_instance_name, scope_name='mmdet')
| 5,383 | 44.243697 | 112 |
py
|
ERD
|
ERD-main/mmdet/utils/util_random.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Helpers for random number generators."""
import numpy as np
def ensure_rng(rng=None):
"""Coerces input into a random number generator.
If the input is None, then a global random state is returned.
If the input is a numeric value, then that is used as a seed to construct a
random state. Otherwise the input is returned as-is.
Adapted from [1]_.
Args:
rng (int | numpy.random.RandomState | None):
if None, then defaults to the global rng. Otherwise this can be an
integer or a RandomState class
Returns:
(numpy.random.RandomState) : rng -
a numpy random number generator
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
"""
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
return rng
| 1,025 | 28.314286 | 119 |
py
|
ERD
|
ERD-main/mmdet/utils/logger.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
from mmengine.logging import print_log
def get_caller_name():
"""Get name of caller method."""
# this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name
# callee_frame = inspect.stack()[1][0] # e.g., log_img_scale
caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale
caller_method = caller_frame.f_code.co_name
try:
caller_class = caller_frame.f_locals['self'].__class__.__name__
return f'{caller_class}.{caller_method}'
except KeyError: # caller is a function
return caller_method
def log_img_scale(img_scale, shape_order='hw', skip_square=False):
"""Log image size.
Args:
img_scale (tuple): Image size to be logged.
shape_order (str, optional): The order of image shape.
'hw' for (height, width) and 'wh' for (width, height).
Defaults to 'hw'.
skip_square (bool, optional): Whether to skip logging for square
img_scale. Defaults to False.
Returns:
bool: Whether to have done logging.
"""
if shape_order == 'hw':
height, width = img_scale
elif shape_order == 'wh':
width, height = img_scale
else:
raise ValueError(f'Invalid shape_order {shape_order}.')
if skip_square and (height == width):
return False
caller = get_caller_name()
print_log(
f'image shape: height={height}, width={width} in {caller}',
logger='current')
return True
| 1,546 | 29.94 | 73 |
py
|
ERD
|
ERD-main/mmdet/utils/collect_env.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import get_git_hash
from mmengine.utils.dl_utils import collect_env as collect_base_env
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| 488 | 26.166667 | 74 |
py
|
ERD
|
ERD-main/mmdet/utils/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import (register_all_modules, setup_cache_size_limit_of_dynamo,
setup_multi_processes)
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg',
'setup_cache_size_limit_of_dynamo'
]
| 1,394 | 48.821429 | 79 |
py
|
ERD
|
ERD-main/mmdet/utils/compat_config.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from mmengine.config import ConfigDict
def compat_cfg(cfg):
"""This function would modify some filed to keep the compatibility of
config.
For example, it will move some args which will be deprecated to the correct
fields.
"""
cfg = copy.deepcopy(cfg)
cfg = compat_imgs_per_gpu(cfg)
cfg = compat_loader_args(cfg)
cfg = compat_runner_args(cfg)
return cfg
def compat_runner_args(cfg):
if 'runner' not in cfg:
cfg.runner = ConfigDict({
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
})
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
return cfg
def compat_imgs_per_gpu(cfg):
cfg = copy.deepcopy(cfg)
if 'imgs_per_gpu' in cfg.data:
warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
warnings.warn(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
return cfg
def compat_loader_args(cfg):
"""Deprecated sample_per_gpu in cfg.data."""
cfg = copy.deepcopy(cfg)
if 'train_dataloader' not in cfg.data:
cfg.data['train_dataloader'] = ConfigDict()
if 'val_dataloader' not in cfg.data:
cfg.data['val_dataloader'] = ConfigDict()
if 'test_dataloader' not in cfg.data:
cfg.data['test_dataloader'] = ConfigDict()
# special process for train_dataloader
if 'samples_per_gpu' in cfg.data:
samples_per_gpu = cfg.data.pop('samples_per_gpu')
assert 'samples_per_gpu' not in \
cfg.data.train_dataloader, ('`samples_per_gpu` are set '
'in `data` field and ` '
'data.train_dataloader` '
'at the same time. '
'Please only set it in '
'`data.train_dataloader`. ')
cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu
if 'persistent_workers' in cfg.data:
persistent_workers = cfg.data.pop('persistent_workers')
assert 'persistent_workers' not in \
cfg.data.train_dataloader, ('`persistent_workers` are set '
'in `data` field and ` '
'data.train_dataloader` '
'at the same time. '
'Please only set it in '
'`data.train_dataloader`. ')
cfg.data.train_dataloader['persistent_workers'] = persistent_workers
if 'workers_per_gpu' in cfg.data:
workers_per_gpu = cfg.data.pop('workers_per_gpu')
cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu
cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu
cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu
# special process for val_dataloader
if 'samples_per_gpu' in cfg.data.val:
# keep default value of `sample_per_gpu` is 1
assert 'samples_per_gpu' not in \
cfg.data.val_dataloader, ('`samples_per_gpu` are set '
'in `data.val` field and ` '
'data.val_dataloader` at '
'the same time. '
'Please only set it in '
'`data.val_dataloader`. ')
cfg.data.val_dataloader['samples_per_gpu'] = \
cfg.data.val.pop('samples_per_gpu')
# special process for val_dataloader
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
if 'samples_per_gpu' in cfg.data.test:
assert 'samples_per_gpu' not in \
cfg.data.test_dataloader, ('`samples_per_gpu` are set '
'in `data.test` field and ` '
'data.test_dataloader` '
'at the same time. '
'Please only set it in '
'`data.test_dataloader`. ')
cfg.data.test_dataloader['samples_per_gpu'] = \
cfg.data.test.pop('samples_per_gpu')
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
if 'samples_per_gpu' in ds_cfg:
assert 'samples_per_gpu' not in \
cfg.data.test_dataloader, ('`samples_per_gpu` are set '
'in `data.test` field and ` '
'data.test_dataloader` at'
' the same time. '
'Please only set it in '
'`data.test_dataloader`. ')
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu
return cfg
| 5,977 | 41.7 | 79 |
py
|
ERD
|
ERD-main/mmdet/utils/split_batch.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def split_batch(img, img_metas, kwargs):
"""Split data_batch by tags.
Code is modified from
<https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/structure_utils.py> # noqa: E501
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (dict): Specific to concrete implementation.
Returns:
data_groups (dict): a dict that data_batch splited by tags,
such as 'sup', 'unsup_teacher', and 'unsup_student'.
"""
# only stack img in the batch
def fuse_list(obj_list, obj):
return torch.stack(obj_list) if isinstance(obj,
torch.Tensor) else obj_list
# select data with tag from data_batch
def select_group(data_batch, current_tag):
group_flag = [tag == current_tag for tag in data_batch['tag']]
return {
k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v)
for k, v in data_batch.items()
}
kwargs.update({'img': img, 'img_metas': img_metas})
kwargs.update({'tag': [meta['tag'] for meta in img_metas]})
tags = list(set(kwargs['tag']))
data_groups = {tag: select_group(kwargs, tag) for tag in tags}
for tag, group in data_groups.items():
group.pop('tag')
return data_groups
| 1,778 | 37.673913 | 99 |
py
|
ERD
|
ERD-main/mmdet/structures/det_data_sample.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional
from mmengine.structures import BaseDataElement, InstanceData, PixelData
class DetDataSample(BaseDataElement):
"""A data structure interface of MMDetection. They are used as interfaces
between different components.
The attributes in ``DetDataSample`` are divided into several parts:
- ``proposals``(InstanceData): Region proposals used in two-stage
detectors.
- ``gt_instances``(InstanceData): Ground truth of instance annotations.
- ``pred_instances``(InstanceData): Instances of model predictions.
- ``ignored_instances``(InstanceData): Instances to be ignored during
training/testing.
- ``gt_panoptic_seg``(PixelData): Ground truth of panoptic
segmentation.
- ``pred_panoptic_seg``(PixelData): Prediction of panoptic
segmentation.
- ``gt_sem_seg``(PixelData): Ground truth of semantic segmentation.
- ``pred_sem_seg``(PixelData): Prediction of semantic segmentation.
Examples:
>>> import torch
>>> import numpy as np
>>> from mmengine.structures import InstanceData
>>> from mmdet.structures import DetDataSample
>>> data_sample = DetDataSample()
>>> img_meta = dict(img_shape=(800, 1196),
... pad_shape=(800, 1216))
>>> gt_instances = InstanceData(metainfo=img_meta)
>>> gt_instances.bboxes = torch.rand((5, 4))
>>> gt_instances.labels = torch.rand((5,))
>>> data_sample.gt_instances = gt_instances
>>> assert 'img_shape' in data_sample.gt_instances.metainfo_keys()
>>> len(data_sample.gt_instances)
5
>>> print(data_sample)
<DetDataSample(
META INFORMATION
DATA FIELDS
gt_instances: <InstanceData(
META INFORMATION
pad_shape: (800, 1216)
img_shape: (800, 1196)
DATA FIELDS
labels: tensor([0.8533, 0.1550, 0.5433, 0.7294, 0.5098])
bboxes:
tensor([[9.7725e-01, 5.8417e-01, 1.7269e-01, 6.5694e-01],
[1.7894e-01, 5.1780e-01, 7.0590e-01, 4.8589e-01],
[7.0392e-01, 6.6770e-01, 1.7520e-01, 1.4267e-01],
[2.2411e-01, 5.1962e-01, 9.6953e-01, 6.6994e-01],
[4.1338e-01, 2.1165e-01, 2.7239e-04, 6.8477e-01]])
) at 0x7f21fb1b9190>
) at 0x7f21fb1b9880>
>>> pred_instances = InstanceData(metainfo=img_meta)
>>> pred_instances.bboxes = torch.rand((5, 4))
>>> pred_instances.scores = torch.rand((5,))
>>> data_sample = DetDataSample(pred_instances=pred_instances)
>>> assert 'pred_instances' in data_sample
>>> data_sample = DetDataSample()
>>> gt_instances_data = dict(
... bboxes=torch.rand(2, 4),
... labels=torch.rand(2),
... masks=np.random.rand(2, 2, 2))
>>> gt_instances = InstanceData(**gt_instances_data)
>>> data_sample.gt_instances = gt_instances
>>> assert 'gt_instances' in data_sample
>>> assert 'masks' in data_sample.gt_instances
>>> data_sample = DetDataSample()
>>> gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(2, 4))
>>> gt_panoptic_seg = PixelData(**gt_panoptic_seg_data)
>>> data_sample.gt_panoptic_seg = gt_panoptic_seg
>>> print(data_sample)
<DetDataSample(
META INFORMATION
DATA FIELDS
_gt_panoptic_seg: <BaseDataElement(
META INFORMATION
DATA FIELDS
panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341],
[0.3200, 0.7448, 0.1052, 0.5371]])
) at 0x7f66c2bb7730>
gt_panoptic_seg: <BaseDataElement(
META INFORMATION
DATA FIELDS
panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341],
[0.3200, 0.7448, 0.1052, 0.5371]])
) at 0x7f66c2bb7730>
) at 0x7f66c2bb7280>
>>> data_sample = DetDataSample()
>>> gt_segm_seg_data = dict(segm_seg=torch.rand(2, 2, 2))
>>> gt_segm_seg = PixelData(**gt_segm_seg_data)
>>> data_sample.gt_segm_seg = gt_segm_seg
>>> assert 'gt_segm_seg' in data_sample
>>> assert 'segm_seg' in data_sample.gt_segm_seg
"""
@property
def proposals(self) -> InstanceData:
return self._proposals
@proposals.setter
def proposals(self, value: InstanceData):
self.set_field(value, '_proposals', dtype=InstanceData)
@proposals.deleter
def proposals(self):
del self._proposals
@property
def gt_instances(self) -> InstanceData:
return self._gt_instances
@gt_instances.setter
def gt_instances(self, value: InstanceData):
self.set_field(value, '_gt_instances', dtype=InstanceData)
@gt_instances.deleter
def gt_instances(self):
del self._gt_instances
@property
def pred_instances(self) -> InstanceData:
return self._pred_instances
@pred_instances.setter
def pred_instances(self, value: InstanceData):
self.set_field(value, '_pred_instances', dtype=InstanceData)
@pred_instances.deleter
def pred_instances(self):
del self._pred_instances
@property
def ignored_instances(self) -> InstanceData:
return self._ignored_instances
@ignored_instances.setter
def ignored_instances(self, value: InstanceData):
self.set_field(value, '_ignored_instances', dtype=InstanceData)
@ignored_instances.deleter
def ignored_instances(self):
del self._ignored_instances
@property
def gt_panoptic_seg(self) -> PixelData:
return self._gt_panoptic_seg
@gt_panoptic_seg.setter
def gt_panoptic_seg(self, value: PixelData):
self.set_field(value, '_gt_panoptic_seg', dtype=PixelData)
@gt_panoptic_seg.deleter
def gt_panoptic_seg(self):
del self._gt_panoptic_seg
@property
def pred_panoptic_seg(self) -> PixelData:
return self._pred_panoptic_seg
@pred_panoptic_seg.setter
def pred_panoptic_seg(self, value: PixelData):
self.set_field(value, '_pred_panoptic_seg', dtype=PixelData)
@pred_panoptic_seg.deleter
def pred_panoptic_seg(self):
del self._pred_panoptic_seg
@property
def gt_sem_seg(self) -> PixelData:
return self._gt_sem_seg
@gt_sem_seg.setter
def gt_sem_seg(self, value: PixelData):
self.set_field(value, '_gt_sem_seg', dtype=PixelData)
@gt_sem_seg.deleter
def gt_sem_seg(self):
del self._gt_sem_seg
@property
def pred_sem_seg(self) -> PixelData:
return self._pred_sem_seg
@pred_sem_seg.setter
def pred_sem_seg(self, value: PixelData):
self.set_field(value, '_pred_sem_seg', dtype=PixelData)
@pred_sem_seg.deleter
def pred_sem_seg(self):
del self._pred_sem_seg
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
| 7,409 | 33.626168 | 79 |
py
|
ERD
|
ERD-main/mmdet/structures/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_data_sample import DetDataSample, OptSampleList, SampleList
__all__ = ['DetDataSample', 'SampleList', 'OptSampleList']
| 178 | 34.8 | 69 |
py
|
ERD
|
ERD-main/mmdet/structures/mask/structures.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from abc import ABCMeta, abstractmethod
from typing import Sequence, Type, TypeVar
import cv2
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import shapely.geometry as geometry
import torch
from mmcv.ops.roi_align import roi_align
T = TypeVar('T')
class BaseInstanceMasks(metaclass=ABCMeta):
"""Base class for instance masks."""
@abstractmethod
def rescale(self, scale, interpolation='nearest'):
"""Rescale masks as large as possible while keeping the aspect ratio.
For details can refer to `mmcv.imrescale`.
Args:
scale (tuple[int]): The maximum size (h, w) of rescaled mask.
interpolation (str): Same as :func:`mmcv.imrescale`.
Returns:
BaseInstanceMasks: The rescaled masks.
"""
@abstractmethod
def resize(self, out_shape, interpolation='nearest'):
"""Resize masks to the given out_shape.
Args:
out_shape: Target (h, w) of resized mask.
interpolation (str): See :func:`mmcv.imresize`.
Returns:
BaseInstanceMasks: The resized masks.
"""
@abstractmethod
def flip(self, flip_direction='horizontal'):
"""Flip masks alone the given direction.
Args:
flip_direction (str): Either 'horizontal' or 'vertical'.
Returns:
BaseInstanceMasks: The flipped masks.
"""
@abstractmethod
def pad(self, out_shape, pad_val):
"""Pad masks to the given size of (h, w).
Args:
out_shape (tuple[int]): Target (h, w) of padded mask.
pad_val (int): The padded value.
Returns:
BaseInstanceMasks: The padded masks.
"""
@abstractmethod
def crop(self, bbox):
"""Crop each mask by the given bbox.
Args:
bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
Return:
BaseInstanceMasks: The cropped masks.
"""
@abstractmethod
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device,
interpolation='bilinear',
binarize=True):
"""Crop and resize masks by the given bboxes.
This function is mainly used in mask targets computation.
It firstly align mask to bboxes by assigned_inds, then crop mask by the
assigned bbox and resize to the size of (mask_h, mask_w)
Args:
bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
out_shape (tuple[int]): Target (h, w) of resized mask
inds (ndarray): Indexes to assign masks to each bbox,
shape (N,) and values should be between [0, num_masks - 1].
device (str): Device of bboxes
interpolation (str): See `mmcv.imresize`
binarize (bool): if True fractional values are rounded to 0 or 1
after the resize operation. if False and unsupported an error
will be raised. Defaults to True.
Return:
BaseInstanceMasks: the cropped and resized masks.
"""
@abstractmethod
def expand(self, expanded_h, expanded_w, top, left):
"""see :class:`Expand`."""
@property
@abstractmethod
def areas(self):
"""ndarray: areas of each instance."""
@abstractmethod
def to_ndarray(self):
"""Convert masks to the format of ndarray.
Return:
ndarray: Converted masks in the format of ndarray.
"""
@abstractmethod
def to_tensor(self, dtype, device):
"""Convert masks to the format of Tensor.
Args:
dtype (str): Dtype of converted mask.
device (torch.device): Device of converted masks.
Returns:
Tensor: Converted masks in the format of Tensor.
"""
@abstractmethod
def translate(self,
out_shape,
offset,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Translate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
border_value (int | float): Border value. Default 0.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
Translated masks.
"""
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border. Default 0.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
ndarray: Sheared masks.
"""
@abstractmethod
def rotate(self, out_shape, angle, center=None, scale=1.0, border_value=0):
"""Rotate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
border_value (int | float): Border value. Default 0 for masks.
Returns:
Rotated masks.
"""
def get_bboxes(self, dst_type='hbb'):
"""Get the certain type boxes from masks.
Please refer to ``mmdet.structures.bbox.box_type`` for more details of
the box type.
Args:
dst_type: Destination box type.
Returns:
:obj:`BaseBoxes`: Certain type boxes.
"""
from ..bbox import get_box_type
_, box_type_cls = get_box_type(dst_type)
return box_type_cls.from_instance_masks(self)
@classmethod
@abstractmethod
def cat(cls: Type[T], masks: Sequence[T]) -> T:
"""Concatenate a sequence of masks into one single mask instance.
Args:
masks (Sequence[T]): A sequence of mask instances.
Returns:
T: Concatenated mask instance.
"""
class BitmapMasks(BaseInstanceMasks):
"""This class represents masks in the form of bitmaps.
Args:
masks (ndarray): ndarray of masks in shape (N, H, W), where N is
the number of objects.
height (int): height of masks
width (int): width of masks
Example:
>>> from mmdet.data_elements.mask.structures import * # NOQA
>>> num_masks, H, W = 3, 32, 32
>>> rng = np.random.RandomState(0)
>>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int64)
>>> self = BitmapMasks(masks, height=H, width=W)
>>> # demo crop_and_resize
>>> num_boxes = 5
>>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
>>> out_shape = (14, 14)
>>> inds = torch.randint(0, len(self), size=(num_boxes,))
>>> device = 'cpu'
>>> interpolation = 'bilinear'
>>> new = self.crop_and_resize(
... bboxes, out_shape, inds, device, interpolation)
>>> assert len(new) == num_boxes
>>> assert new.height, new.width == out_shape
"""
def __init__(self, masks, height, width):
self.height = height
self.width = width
if len(masks) == 0:
self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
else:
assert isinstance(masks, (list, np.ndarray))
if isinstance(masks, list):
assert isinstance(masks[0], np.ndarray)
assert masks[0].ndim == 2 # (H, W)
else:
assert masks.ndim == 3 # (N, H, W)
self.masks = np.stack(masks).reshape(-1, height, width)
assert self.masks.shape[1] == self.height
assert self.masks.shape[2] == self.width
def __getitem__(self, index):
"""Index the BitmapMask.
Args:
index (int | ndarray): Indices in the format of integer or ndarray.
Returns:
:obj:`BitmapMasks`: Indexed bitmap masks.
"""
masks = self.masks[index].reshape(-1, self.height, self.width)
return BitmapMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.rescale`."""
if len(self.masks) == 0:
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
else:
rescaled_masks = np.stack([
mmcv.imrescale(mask, scale, interpolation=interpolation)
for mask in self.masks
])
height, width = rescaled_masks.shape[1:]
return BitmapMasks(rescaled_masks, height, width)
def resize(self, out_shape, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.resize`."""
if len(self.masks) == 0:
resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
resized_masks = np.stack([
mmcv.imresize(
mask, out_shape[::-1], interpolation=interpolation)
for mask in self.masks
])
return BitmapMasks(resized_masks, *out_shape)
def flip(self, flip_direction='horizontal'):
"""See :func:`BaseInstanceMasks.flip`."""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = self.masks
else:
flipped_masks = np.stack([
mmcv.imflip(mask, direction=flip_direction)
for mask in self.masks
])
return BitmapMasks(flipped_masks, self.height, self.width)
def pad(self, out_shape, pad_val=0):
"""See :func:`BaseInstanceMasks.pad`."""
if len(self.masks) == 0:
padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
padded_masks = np.stack([
mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
for mask in self.masks
])
return BitmapMasks(padded_masks, *out_shape)
def crop(self, bbox):
"""See :func:`BaseInstanceMasks.crop`."""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = np.empty((0, h, w), dtype=np.uint8)
else:
cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
return BitmapMasks(cropped_masks, h, w)
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""See :func:`BaseInstanceMasks.crop_and_resize`."""
if len(self.masks) == 0:
empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
return BitmapMasks(empty_masks, *out_shape)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(
num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
0, inds).to(dtype=rois.dtype)
targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
1.0, 0, 'avg', True).squeeze(1)
if binarize:
resized_masks = (targets >= 0.5).cpu().numpy()
else:
resized_masks = targets.cpu().numpy()
else:
resized_masks = []
return BitmapMasks(resized_masks, *out_shape)
def expand(self, expanded_h, expanded_w, top, left):
"""See :func:`BaseInstanceMasks.expand`."""
if len(self.masks) == 0:
expanded_mask = np.empty((0, expanded_h, expanded_w),
dtype=np.uint8)
else:
expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
dtype=np.uint8)
expanded_mask[:, top:top + self.height,
left:left + self.width] = self.masks
return BitmapMasks(expanded_mask, expanded_h, expanded_w)
def translate(self,
out_shape,
offset,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Translate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
border_value (int | float): Border value. Default 0 for masks.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
BitmapMasks: Translated BitmapMasks.
Example:
>>> from mmdet.data_elements.mask.structures import BitmapMasks
>>> self = BitmapMasks.random(dtype=np.uint8)
>>> out_shape = (32, 32)
>>> offset = 4
>>> direction = 'horizontal'
>>> border_value = 0
>>> interpolation = 'bilinear'
>>> # Note, There seem to be issues when:
>>> # * the mask dtype is not supported by cv2.AffineWarp
>>> new = self.translate(out_shape, offset, direction,
>>> border_value, interpolation)
>>> assert len(new) == len(self)
>>> assert new.height, new.width == out_shape
"""
if len(self.masks) == 0:
translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
masks = self.masks
if masks.shape[-2:] != out_shape:
empty_masks = np.zeros((masks.shape[0], *out_shape),
dtype=masks.dtype)
min_h = min(out_shape[0], masks.shape[1])
min_w = min(out_shape[1], masks.shape[2])
empty_masks[:, :min_h, :min_w] = masks[:, :min_h, :min_w]
masks = empty_masks
translated_masks = mmcv.imtranslate(
masks.transpose((1, 2, 0)),
offset,
direction,
border_value=border_value,
interpolation=interpolation)
if translated_masks.ndim == 2:
translated_masks = translated_masks[:, :, None]
translated_masks = translated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(translated_masks, *out_shape)
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
BitmapMasks: The sheared masks.
"""
if len(self.masks) == 0:
sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
sheared_masks = mmcv.imshear(
self.masks.transpose((1, 2, 0)),
magnitude,
direction,
border_value=border_value,
interpolation=interpolation)
if sheared_masks.ndim == 2:
sheared_masks = sheared_masks[:, :, None]
sheared_masks = sheared_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(sheared_masks, *out_shape)
def rotate(self,
out_shape,
angle,
center=None,
scale=1.0,
border_value=0,
interpolation='bilinear'):
"""Rotate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
border_value (int | float): Border value. Default 0 for masks.
interpolation (str): Same as in :func:`mmcv.imrotate`.
Returns:
BitmapMasks: Rotated BitmapMasks.
"""
if len(self.masks) == 0:
rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
else:
rotated_masks = mmcv.imrotate(
self.masks.transpose((1, 2, 0)),
angle,
center=center,
scale=scale,
border_value=border_value,
interpolation=interpolation)
if rotated_masks.ndim == 2:
# case when only one mask, (h, w)
rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
rotated_masks = rotated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(rotated_masks, *out_shape)
@property
def areas(self):
"""See :py:attr:`BaseInstanceMasks.areas`."""
return self.masks.sum((1, 2))
def to_ndarray(self):
"""See :func:`BaseInstanceMasks.to_ndarray`."""
return self.masks
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
return torch.tensor(self.masks, dtype=dtype, device=device)
@classmethod
def random(cls,
num_masks=3,
height=32,
width=32,
dtype=np.uint8,
rng=None):
"""Generate random bitmap masks for demo / testing purposes.
Example:
>>> from mmdet.data_elements.mask.structures import BitmapMasks
>>> self = BitmapMasks.random()
>>> print('self = {}'.format(self))
self = BitmapMasks(num_masks=3, height=32, width=32)
"""
from mmdet.utils.util_random import ensure_rng
rng = ensure_rng(rng)
masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)
self = cls(masks, height=height, width=width)
return self
@classmethod
def cat(cls: Type[T], masks: Sequence[T]) -> T:
"""Concatenate a sequence of masks into one single mask instance.
Args:
masks (Sequence[BitmapMasks]): A sequence of mask instances.
Returns:
BitmapMasks: Concatenated mask instance.
"""
assert isinstance(masks, Sequence)
if len(masks) == 0:
raise ValueError('masks should not be an empty list.')
assert all(isinstance(m, cls) for m in masks)
mask_array = np.concatenate([m.masks for m in masks], axis=0)
return cls(mask_array, *mask_array.shape[1:])
class PolygonMasks(BaseInstanceMasks):
"""This class represents masks in the form of polygons.
Polygons is a list of three levels. The first level of the list
corresponds to objects, the second level to the polys that compose the
object, the third level to the poly coordinates
Args:
masks (list[list[ndarray]]): The first level of the list
corresponds to objects, the second level to the polys that
compose the object, the third level to the poly coordinates
height (int): height of masks
width (int): width of masks
Example:
>>> from mmdet.data_elements.mask.structures import * # NOQA
>>> masks = [
>>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]
>>> ]
>>> height, width = 16, 16
>>> self = PolygonMasks(masks, height, width)
>>> # demo translate
>>> new = self.translate((16, 16), 4., direction='horizontal')
>>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])
>>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)
>>> # demo crop_and_resize
>>> num_boxes = 3
>>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
>>> out_shape = (16, 16)
>>> inds = torch.randint(0, len(self), size=(num_boxes,))
>>> device = 'cpu'
>>> interpolation = 'bilinear'
>>> new = self.crop_and_resize(
... bboxes, out_shape, inds, device, interpolation)
>>> assert len(new) == num_boxes
>>> assert new.height, new.width == out_shape
"""
def __init__(self, masks, height, width):
assert isinstance(masks, list)
if len(masks) > 0:
assert isinstance(masks[0], list)
assert isinstance(masks[0][0], np.ndarray)
self.height = height
self.width = width
self.masks = masks
def __getitem__(self, index):
"""Index the polygon masks.
Args:
index (ndarray | List): The indices.
Returns:
:obj:`PolygonMasks`: The indexed polygon masks.
"""
if isinstance(index, np.ndarray):
if index.dtype == bool:
index = np.where(index)[0].tolist()
else:
index = index.tolist()
if isinstance(index, list):
masks = [self.masks[i] for i in index]
else:
try:
masks = self.masks[index]
except Exception:
raise ValueError(
f'Unsupported input of type {type(index)} for indexing!')
if len(masks) and isinstance(masks[0], np.ndarray):
masks = [masks] # ensure a list of three levels
return PolygonMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation=None):
"""see :func:`BaseInstanceMasks.rescale`"""
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
if len(self.masks) == 0:
rescaled_masks = PolygonMasks([], new_h, new_w)
else:
rescaled_masks = self.resize((new_h, new_w))
return rescaled_masks
def resize(self, out_shape, interpolation=None):
"""see :func:`BaseInstanceMasks.resize`"""
if len(self.masks) == 0:
resized_masks = PolygonMasks([], *out_shape)
else:
h_scale = out_shape[0] / self.height
w_scale = out_shape[1] / self.width
resized_masks = []
for poly_per_obj in self.masks:
resized_poly = []
for p in poly_per_obj:
p = p.copy()
p[0::2] = p[0::2] * w_scale
p[1::2] = p[1::2] * h_scale
resized_poly.append(p)
resized_masks.append(resized_poly)
resized_masks = PolygonMasks(resized_masks, *out_shape)
return resized_masks
def flip(self, flip_direction='horizontal'):
"""see :func:`BaseInstanceMasks.flip`"""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = PolygonMasks([], self.height, self.width)
else:
flipped_masks = []
for poly_per_obj in self.masks:
flipped_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if flip_direction == 'horizontal':
p[0::2] = self.width - p[0::2]
elif flip_direction == 'vertical':
p[1::2] = self.height - p[1::2]
else:
p[0::2] = self.width - p[0::2]
p[1::2] = self.height - p[1::2]
flipped_poly_per_obj.append(p)
flipped_masks.append(flipped_poly_per_obj)
flipped_masks = PolygonMasks(flipped_masks, self.height,
self.width)
return flipped_masks
def crop(self, bbox):
"""see :func:`BaseInstanceMasks.crop`"""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = PolygonMasks([], h, w)
else:
# reference: https://github.com/facebookresearch/fvcore/blob/main/fvcore/transforms/transform.py # noqa
crop_box = geometry.box(x1, y1, x2, y2).buffer(0.0)
cropped_masks = []
# suppress shapely warnings util it incorporates GEOS>=3.11.2
# reference: https://github.com/shapely/shapely/issues/1345
initial_settings = np.seterr()
np.seterr(invalid='ignore')
for poly_per_obj in self.masks:
cropped_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
p = geometry.Polygon(p.reshape(-1, 2)).buffer(0.0)
# polygon must be valid to perform intersection.
if not p.is_valid:
continue
cropped = p.intersection(crop_box)
if cropped.is_empty:
continue
if isinstance(cropped,
geometry.collection.BaseMultipartGeometry):
cropped = cropped.geoms
else:
cropped = [cropped]
# one polygon may be cropped to multiple ones
for poly in cropped:
# ignore lines or points
if not isinstance(
poly, geometry.Polygon) or not poly.is_valid:
continue
coords = np.asarray(poly.exterior.coords)
# remove an extra identical vertex at the end
coords = coords[:-1]
coords[:, 0] -= x1
coords[:, 1] -= y1
cropped_poly_per_obj.append(coords.reshape(-1))
# a dummy polygon to avoid misalignment between masks and boxes
if len(cropped_poly_per_obj) == 0:
cropped_poly_per_obj = [np.array([0, 0, 0, 0, 0, 0])]
cropped_masks.append(cropped_poly_per_obj)
np.seterr(**initial_settings)
cropped_masks = PolygonMasks(cropped_masks, h, w)
return cropped_masks
def pad(self, out_shape, pad_val=0):
"""padding has no effect on polygons`"""
return PolygonMasks(self.masks, *out_shape)
def expand(self, *args, **kwargs):
"""TODO: Add expand for polygon"""
raise NotImplementedError
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""see :func:`BaseInstanceMasks.crop_and_resize`"""
out_h, out_w = out_shape
if len(self.masks) == 0:
return PolygonMasks([], out_h, out_w)
if not binarize:
raise ValueError('Polygons are always binary, '
'setting binarize=False is unsupported')
resized_masks = []
for i in range(len(bboxes)):
mask = self.masks[inds[i]]
bbox = bboxes[i, :]
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
h_scale = out_h / max(h, 0.1) # avoid too large scale
w_scale = out_w / max(w, 0.1)
resized_mask = []
for p in mask:
p = p.copy()
# crop
# pycocotools will clip the boundary
p[0::2] = p[0::2] - bbox[0]
p[1::2] = p[1::2] - bbox[1]
# resize
p[0::2] = p[0::2] * w_scale
p[1::2] = p[1::2] * h_scale
resized_mask.append(p)
resized_masks.append(resized_mask)
return PolygonMasks(resized_masks, *out_shape)
def translate(self,
out_shape,
offset,
direction='horizontal',
border_value=None,
interpolation=None):
"""Translate the PolygonMasks.
Example:
>>> self = PolygonMasks.random(dtype=np.int64)
>>> out_shape = (self.height, self.width)
>>> new = self.translate(out_shape, 4., direction='horizontal')
>>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])
>>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501
"""
assert border_value is None or border_value == 0, \
'Here border_value is not '\
f'used, and defaultly should be None or 0. got {border_value}.'
if len(self.masks) == 0:
translated_masks = PolygonMasks([], *out_shape)
else:
translated_masks = []
for poly_per_obj in self.masks:
translated_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if direction == 'horizontal':
p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
elif direction == 'vertical':
p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
translated_poly_per_obj.append(p)
translated_masks.append(translated_poly_per_obj)
translated_masks = PolygonMasks(translated_masks, *out_shape)
return translated_masks
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""See :func:`BaseInstanceMasks.shear`."""
if len(self.masks) == 0:
sheared_masks = PolygonMasks([], *out_shape)
else:
sheared_masks = []
if direction == 'horizontal':
shear_matrix = np.stack([[1, magnitude],
[0, 1]]).astype(np.float32)
elif direction == 'vertical':
shear_matrix = np.stack([[1, 0], [magnitude,
1]]).astype(np.float32)
for poly_per_obj in self.masks:
sheared_poly = []
for p in poly_per_obj:
p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
new_coords = np.matmul(shear_matrix, p) # [2, n]
new_coords[0, :] = np.clip(new_coords[0, :], 0,
out_shape[1])
new_coords[1, :] = np.clip(new_coords[1, :], 0,
out_shape[0])
sheared_poly.append(
new_coords.transpose((1, 0)).reshape(-1))
sheared_masks.append(sheared_poly)
sheared_masks = PolygonMasks(sheared_masks, *out_shape)
return sheared_masks
def rotate(self,
out_shape,
angle,
center=None,
scale=1.0,
border_value=0,
interpolation='bilinear'):
"""See :func:`BaseInstanceMasks.rotate`."""
if len(self.masks) == 0:
rotated_masks = PolygonMasks([], *out_shape)
else:
rotated_masks = []
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
for poly_per_obj in self.masks:
rotated_poly = []
for p in poly_per_obj:
p = p.copy()
coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coords = np.concatenate(
(coords, np.ones((coords.shape[0], 1), coords.dtype)),
axis=1) # [n, 3]
rotated_coords = np.matmul(
rotate_matrix[None, :, :],
coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
out_shape[1])
rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
out_shape[0])
rotated_poly.append(rotated_coords.reshape(-1))
rotated_masks.append(rotated_poly)
rotated_masks = PolygonMasks(rotated_masks, *out_shape)
return rotated_masks
def to_bitmap(self):
"""convert polygon masks to bitmap masks."""
bitmap_masks = self.to_ndarray()
return BitmapMasks(bitmap_masks, self.height, self.width)
@property
def areas(self):
"""Compute areas of masks.
This func is modified from `detectron2
<https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.
The function only works with Polygons using the shoelace formula.
Return:
ndarray: areas of each instance
""" # noqa: W501
area = []
for polygons_per_obj in self.masks:
area_per_obj = 0
for p in polygons_per_obj:
area_per_obj += self._polygon_area(p[0::2], p[1::2])
area.append(area_per_obj)
return np.asarray(area)
def _polygon_area(self, x, y):
"""Compute the area of a component of a polygon.
Using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Args:
x (ndarray): x coordinates of the component
y (ndarray): y coordinates of the component
Return:
float: the are of the component
""" # noqa: 501
return 0.5 * np.abs(
np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def to_ndarray(self):
"""Convert masks to the format of ndarray."""
if len(self.masks) == 0:
return np.empty((0, self.height, self.width), dtype=np.uint8)
bitmap_masks = []
for poly_per_obj in self.masks:
bitmap_masks.append(
polygon_to_bitmap(poly_per_obj, self.height, self.width))
return np.stack(bitmap_masks)
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
if len(self.masks) == 0:
return torch.empty((0, self.height, self.width),
dtype=dtype,
device=device)
ndarray_masks = self.to_ndarray()
return torch.tensor(ndarray_masks, dtype=dtype, device=device)
@classmethod
def random(cls,
num_masks=3,
height=32,
width=32,
n_verts=5,
dtype=np.float32,
rng=None):
"""Generate random polygon masks for demo / testing purposes.
Adapted from [1]_
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501
Example:
>>> from mmdet.data_elements.mask.structures import PolygonMasks
>>> self = PolygonMasks.random()
>>> print('self = {}'.format(self))
"""
from mmdet.utils.util_random import ensure_rng
rng = ensure_rng(rng)
def _gen_polygon(n, irregularity, spikeyness):
"""Creates the polygon by sampling points on a circle around the
centre. Random noise is added by varying the angular spacing
between sequential points, and by varying the radial distance of
each point from the centre.
Based on original code by Mike Ounsworth
Args:
n (int): number of vertices
irregularity (float): [0,1] indicating how much variance there
is in the angular spacing of vertices. [0,1] will map to
[0, 2pi/numberOfVerts]
spikeyness (float): [0,1] indicating how much variance there is
in each vertex from the circle of radius aveRadius. [0,1]
will map to [0, aveRadius]
Returns:
a list of vertices, in CCW order.
"""
from scipy.stats import truncnorm
# Generate around the unit circle
cx, cy = (0.0, 0.0)
radius = 1
tau = np.pi * 2
irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n
spikeyness = np.clip(spikeyness, 1e-9, 1)
# generate n angle steps
lower = (tau / n) - irregularity
upper = (tau / n) + irregularity
angle_steps = rng.uniform(lower, upper, n)
# normalize the steps so that point 0 and point n+1 are the same
k = angle_steps.sum() / (2 * np.pi)
angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)
# Convert high and low values to be wrt the standard normal range
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html
low = 0
high = 2 * radius
mean = radius
std = spikeyness
a = (low - mean) / std
b = (high - mean) / std
tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)
# now generate the points
radii = tnorm.rvs(n, random_state=rng)
x_pts = cx + radii * np.cos(angles)
y_pts = cy + radii * np.sin(angles)
points = np.hstack([x_pts[:, None], y_pts[:, None]])
# Scale to 0-1 space
points = points - points.min(axis=0)
points = points / points.max(axis=0)
# Randomly place within 0-1 space
points = points * (rng.rand() * .8 + .2)
min_pt = points.min(axis=0)
max_pt = points.max(axis=0)
high = (1 - max_pt)
low = (0 - min_pt)
offset = (rng.rand(2) * (high - low)) + low
points = points + offset
return points
def _order_vertices(verts):
"""
References:
https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise
"""
mlat = verts.T[0].sum() / len(verts)
mlng = verts.T[1].sum() / len(verts)
tau = np.pi * 2
angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +
tau) % tau
sortx = angle.argsort()
verts = verts.take(sortx, axis=0)
return verts
# Generate a random exterior for each requested mask
masks = []
for _ in range(num_masks):
exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))
exterior = (exterior * [(width, height)]).astype(dtype)
masks.append([exterior.ravel()])
self = cls(masks, height, width)
return self
@classmethod
def cat(cls: Type[T], masks: Sequence[T]) -> T:
"""Concatenate a sequence of masks into one single mask instance.
Args:
masks (Sequence[PolygonMasks]): A sequence of mask instances.
Returns:
PolygonMasks: Concatenated mask instance.
"""
assert isinstance(masks, Sequence)
if len(masks) == 0:
raise ValueError('masks should not be an empty list.')
assert all(isinstance(m, cls) for m in masks)
mask_list = list(itertools.chain(*[m.masks for m in masks]))
return cls(mask_list, masks[0].height, masks[0].width)
def polygon_to_bitmap(polygons, height, width):
"""Convert masks from the form of polygons to bitmaps.
Args:
polygons (list[ndarray]): masks in polygon representation
height (int): mask height
width (int): mask width
Return:
ndarray: the converted masks in bitmap representation
"""
rles = maskUtils.frPyObjects(polygons, height, width)
rle = maskUtils.merge(rles)
bitmap_mask = maskUtils.decode(rle).astype(bool)
return bitmap_mask
def bitmap_to_polygon(bitmap):
"""Convert masks from the form of bitmaps to polygons.
Args:
bitmap (ndarray): masks in bitmap representation.
Return:
list[ndarray]: the converted mask in polygon representation.
bool: whether the mask has holes.
"""
bitmap = np.ascontiguousarray(bitmap).astype(np.uint8)
# cv2.RETR_CCOMP: retrieves all of the contours and organizes them
# into a two-level hierarchy. At the top level, there are external
# boundaries of the components. At the second level, there are
# boundaries of the holes. If there is another contour inside a hole
# of a connected component, it is still put at the top level.
# cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points.
outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
contours = outs[-2]
hierarchy = outs[-1]
if hierarchy is None:
return [], False
# hierarchy[i]: 4 elements, for the indexes of next, previous,
# parent, or nested contours. If there is no corresponding contour,
# it will be -1.
with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any()
contours = [c.reshape(-1, 2) for c in contours]
return contours, with_hole
| 44,891 | 36.59799 | 141 |
py
|
ERD
|
ERD-main/mmdet/structures/mask/mask_target.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.nn.modules.utils import _pair
def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
cfg):
"""Compute mask target for positive proposals in multiple images.
Args:
pos_proposals_list (list[Tensor]): Positive proposals in multiple
images, each has shape (num_pos, 4).
pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
positive proposals, each has shape (num_pos,).
gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
each image.
cfg (dict): Config dict that specifies the mask size.
Returns:
Tensor: Mask target of each image, has shape (num_pos, w, h).
Example:
>>> from mmengine.config import Config
>>> import mmdet
>>> from mmdet.data_elements.mask import BitmapMasks
>>> from mmdet.data_elements.mask.mask_target import *
>>> H, W = 17, 18
>>> cfg = Config({'mask_size': (13, 14)})
>>> rng = np.random.RandomState(0)
>>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
>>> pos_proposals_list = [
>>> torch.Tensor([
>>> [ 7.2425, 5.5929, 13.9414, 14.9541],
>>> [ 7.3241, 3.6170, 16.3850, 15.3102],
>>> ]),
>>> torch.Tensor([
>>> [ 4.8448, 6.4010, 7.0314, 9.7681],
>>> [ 5.9790, 2.6989, 7.4416, 4.8580],
>>> [ 0.0000, 0.0000, 0.1398, 9.8232],
>>> ]),
>>> ]
>>> # Corresponding class index for each proposal for each image
>>> pos_assigned_gt_inds_list = [
>>> torch.LongTensor([7, 0]),
>>> torch.LongTensor([5, 4, 1]),
>>> ]
>>> # Ground truth mask for each true object for each image
>>> gt_masks_list = [
>>> BitmapMasks(rng.rand(8, H, W), height=H, width=W),
>>> BitmapMasks(rng.rand(6, H, W), height=H, width=W),
>>> ]
>>> mask_targets = mask_target(
>>> pos_proposals_list, pos_assigned_gt_inds_list,
>>> gt_masks_list, cfg)
>>> assert mask_targets.shape == (5,) + cfg['mask_size']
"""
cfg_list = [cfg for _ in range(len(pos_proposals_list))]
mask_targets = map(mask_target_single, pos_proposals_list,
pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
mask_targets = list(mask_targets)
if len(mask_targets) > 0:
mask_targets = torch.cat(mask_targets)
return mask_targets
def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
"""Compute mask target for each positive proposal in the image.
Args:
pos_proposals (Tensor): Positive proposals.
pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
or Polygon.
cfg (dict): Config dict that indicate the mask size.
Returns:
Tensor: Mask target of each positive proposals in the image.
Example:
>>> from mmengine.config import Config
>>> import mmdet
>>> from mmdet.data_elements.mask import BitmapMasks
>>> from mmdet.data_elements.mask.mask_target import * # NOQA
>>> H, W = 32, 32
>>> cfg = Config({'mask_size': (7, 11)})
>>> rng = np.random.RandomState(0)
>>> # Masks for each ground truth box (relative to the image)
>>> gt_masks_data = rng.rand(3, H, W)
>>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
>>> # Predicted positive boxes in one image
>>> pos_proposals = torch.FloatTensor([
>>> [ 16.2, 5.5, 19.9, 20.9],
>>> [ 17.3, 13.6, 19.3, 19.3],
>>> [ 14.8, 16.4, 17.0, 23.7],
>>> [ 0.0, 0.0, 16.0, 16.0],
>>> [ 4.0, 0.0, 20.0, 16.0],
>>> ])
>>> # For each predicted proposal, its assignment to a gt mask
>>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
>>> mask_targets = mask_target_single(
>>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
>>> assert mask_targets.shape == (5,) + cfg['mask_size']
"""
device = pos_proposals.device
mask_size = _pair(cfg.mask_size)
binarize = not cfg.get('soft_mask_target', False)
num_pos = pos_proposals.size(0)
if num_pos > 0:
proposals_np = pos_proposals.cpu().numpy()
maxh, maxw = gt_masks.height, gt_masks.width
proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
mask_targets = gt_masks.crop_and_resize(
proposals_np,
mask_size,
device=device,
inds=pos_assigned_gt_inds,
binarize=binarize).to_ndarray()
mask_targets = torch.from_numpy(mask_targets).float().to(device)
else:
mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
return mask_targets
| 5,264 | 40.132813 | 78 |
py
|
ERD
|
ERD-main/mmdet/structures/mask/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pycocotools.mask as mask_util
import torch
from mmengine.utils import slice_list
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = slice_list(polys_single, polys_lens_single)
mask_polys = slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list): bitmap mask results.
Returns:
list | tuple: RLE encoded mask.
"""
encoded_mask_results = []
for mask in mask_results:
encoded_mask_results.append(
mask_util.encode(
np.array(mask[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
return encoded_mask_results
def mask2bbox(masks):
"""Obtain tight bounding boxes of binary masks.
Args:
masks (Tensor): Binary mask of shape (n, h, w).
Returns:
Tensor: Bboxe with shape (n, 4) of \
positive region in binary mask.
"""
N = masks.shape[0]
bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
x_any = torch.any(masks, dim=1)
y_any = torch.any(masks, dim=2)
for i in range(N):
x = torch.where(x_any[i, :])[0]
y = torch.where(y_any[i, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[i, :] = bboxes.new_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1])
return bboxes
| 2,508 | 31.166667 | 75 |
py
|
ERD
|
ERD-main/mmdet/structures/mask/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .mask_target import mask_target
from .structures import (BaseInstanceMasks, BitmapMasks, PolygonMasks,
bitmap_to_polygon, polygon_to_bitmap)
from .utils import encode_mask_results, mask2bbox, split_combined_polys
__all__ = [
'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
'PolygonMasks', 'encode_mask_results', 'mask2bbox', 'polygon_to_bitmap',
'bitmap_to_polygon'
]
| 486 | 39.583333 | 78 |
py
|
ERD
|
ERD-main/mmdet/structures/bbox/box_type.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Callable, Optional, Tuple, Type, Union
import numpy as np
import torch
from torch import Tensor
from .base_boxes import BaseBoxes
BoxType = Union[np.ndarray, Tensor, BaseBoxes]
box_types: dict = {}
_box_type_to_name: dict = {}
box_converters: dict = {}
def _register_box(name: str, box_type: Type, force: bool = False) -> None:
"""Register a box type.
Args:
name (str): The name of box type.
box_type (type): Box mode class to be registered.
force (bool): Whether to override an existing class with the same
name. Defaults to False.
"""
assert issubclass(box_type, BaseBoxes)
name = name.lower()
if not force and (name in box_types or box_type in _box_type_to_name):
raise KeyError(f'box type {name} has been registered')
elif name in box_types:
_box_type = box_types.pop(name)
_box_type_to_name.pop(_box_type)
elif box_type in _box_type_to_name:
_name = _box_type_to_name.pop(box_type)
box_types.pop(_name)
box_types[name] = box_type
_box_type_to_name[box_type] = name
def register_box(name: str,
box_type: Type = None,
force: bool = False) -> Union[Type, Callable]:
"""Register a box type.
A record will be added to ``bbox_types``, whose key is the box type name
and value is the box type itself. Simultaneously, a reverse dictionary
``_box_type_to_name`` will be updated. It can be used as a decorator or
a normal function.
Args:
name (str): The name of box type.
bbox_type (type, Optional): Box type class to be registered.
Defaults to None.
force (bool): Whether to override the existing box type with the same
name. Defaults to False.
Examples:
>>> from mmdet.structures.bbox import register_box
>>> from mmdet.structures.bbox import BaseBoxes
>>> # as a decorator
>>> @register_box('hbox')
>>> class HorizontalBoxes(BaseBoxes):
>>> pass
>>> # as a normal function
>>> class RotatedBoxes(BaseBoxes):
>>> pass
>>> register_box('rbox', RotatedBoxes)
"""
if not isinstance(force, bool):
raise TypeError(f'force must be a boolean, but got {type(force)}')
# use it as a normal method: register_box(name, box_type=BoxCls)
if box_type is not None:
_register_box(name=name, box_type=box_type, force=force)
return box_type
# use it as a decorator: @register_box(name)
def _register(cls):
_register_box(name=name, box_type=cls, force=force)
return cls
return _register
def _register_box_converter(src_type: Union[str, type],
dst_type: Union[str, type],
converter: Callable,
force: bool = False) -> None:
"""Register a box converter.
Args:
src_type (str or type): source box type name or class.
dst_type (str or type): destination box type name or class.
converter (Callable): Convert function.
force (bool): Whether to override the existing box type with the same
name. Defaults to False.
"""
assert callable(converter)
src_type_name, _ = get_box_type(src_type)
dst_type_name, _ = get_box_type(dst_type)
converter_name = src_type_name + '2' + dst_type_name
if not force and converter_name in box_converters:
raise KeyError(f'The box converter from {src_type_name} to '
f'{dst_type_name} has been registered.')
box_converters[converter_name] = converter
def register_box_converter(src_type: Union[str, type],
dst_type: Union[str, type],
converter: Optional[Callable] = None,
force: bool = False) -> Callable:
"""Register a box converter.
A record will be added to ``box_converter``, whose key is
'{src_type_name}2{dst_type_name}' and value is the convert function.
It can be used as a decorator or a normal function.
Args:
src_type (str or type): source box type name or class.
dst_type (str or type): destination box type name or class.
converter (Callable): Convert function. Defaults to None.
force (bool): Whether to override the existing box type with the same
name. Defaults to False.
Examples:
>>> from mmdet.structures.bbox import register_box_converter
>>> # as a decorator
>>> @register_box_converter('hbox', 'rbox')
>>> def converter_A(boxes):
>>> pass
>>> # as a normal function
>>> def converter_B(boxes):
>>> pass
>>> register_box_converter('rbox', 'hbox', converter_B)
"""
if not isinstance(force, bool):
raise TypeError(f'force must be a boolean, but got {type(force)}')
# use it as a normal method:
# register_box_converter(src_type, dst_type, converter=Func)
if converter is not None:
_register_box_converter(
src_type=src_type,
dst_type=dst_type,
converter=converter,
force=force)
return converter
# use it as a decorator: @register_box_converter(name)
def _register(func):
_register_box_converter(
src_type=src_type, dst_type=dst_type, converter=func, force=force)
return func
return _register
def get_box_type(box_type: Union[str, type]) -> Tuple[str, type]:
"""get both box type name and class.
Args:
box_type (str or type): Single box type name or class.
Returns:
Tuple[str, type]: A tuple of box type name and class.
"""
if isinstance(box_type, str):
type_name = box_type.lower()
assert type_name in box_types, \
f"Box type {type_name} hasn't been registered in box_types."
type_cls = box_types[type_name]
elif issubclass(box_type, BaseBoxes):
assert box_type in _box_type_to_name, \
f"Box type {box_type} hasn't been registered in box_types."
type_name = _box_type_to_name[box_type]
type_cls = box_type
else:
raise KeyError('box_type must be a str or class inheriting from '
f'BaseBoxes, but got {type(box_type)}.')
return type_name, type_cls
def convert_box_type(boxes: BoxType,
*,
src_type: Union[str, type] = None,
dst_type: Union[str, type] = None) -> BoxType:
"""Convert boxes from source type to destination type.
If ``boxes`` is a instance of BaseBoxes, the ``src_type`` will be set
as the type of ``boxes``.
Args:
boxes (np.ndarray or Tensor or :obj:`BaseBoxes`): boxes need to
convert.
src_type (str or type, Optional): source box type. Defaults to None.
dst_type (str or type, Optional): destination box type. Defaults to
None.
Returns:
Union[np.ndarray, Tensor, :obj:`BaseBoxes`]: Converted boxes. It's type
is consistent with the input's type.
"""
assert dst_type is not None
dst_type_name, dst_type_cls = get_box_type(dst_type)
is_box_cls = False
is_numpy = False
if isinstance(boxes, BaseBoxes):
src_type_name, _ = get_box_type(type(boxes))
is_box_cls = True
elif isinstance(boxes, (Tensor, np.ndarray)):
assert src_type is not None
src_type_name, _ = get_box_type(src_type)
if isinstance(boxes, np.ndarray):
is_numpy = True
else:
raise TypeError('boxes must be a instance of BaseBoxes, Tensor or '
f'ndarray, but get {type(boxes)}.')
if src_type_name == dst_type_name:
return boxes
converter_name = src_type_name + '2' + dst_type_name
assert converter_name in box_converters, \
"Convert function hasn't been registered in box_converters."
converter = box_converters[converter_name]
if is_box_cls:
boxes = converter(boxes.tensor)
return dst_type_cls(boxes)
elif is_numpy:
boxes = converter(torch.from_numpy(boxes))
return boxes.numpy()
else:
return converter(boxes)
def autocast_box_type(dst_box_type='hbox') -> Callable:
"""A decorator which automatically casts results['gt_bboxes'] to the
destination box type.
It commenly used in mmdet.datasets.transforms to make the transforms up-
compatible with the np.ndarray type of results['gt_bboxes'].
The speed of processing of np.ndarray and BaseBoxes data are the same:
- np.ndarray: 0.0509 img/s
- BaseBoxes: 0.0551 img/s
Args:
dst_box_type (str): Destination box type.
"""
_, box_type_cls = get_box_type(dst_box_type)
def decorator(func: Callable) -> Callable:
def wrapper(self, results: dict, *args, **kwargs) -> dict:
if ('gt_bboxes' not in results
or isinstance(results['gt_bboxes'], BaseBoxes)):
return func(self, results)
elif isinstance(results['gt_bboxes'], np.ndarray):
results['gt_bboxes'] = box_type_cls(
results['gt_bboxes'], clone=False)
if 'mix_results' in results:
for res in results['mix_results']:
if isinstance(res['gt_bboxes'], np.ndarray):
res['gt_bboxes'] = box_type_cls(
res['gt_bboxes'], clone=False)
_results = func(self, results, *args, **kwargs)
# In some cases, the function will process gt_bboxes in-place
# Simultaneously convert inputting and outputting gt_bboxes
# back to np.ndarray
if isinstance(_results, dict) and 'gt_bboxes' in _results:
if isinstance(_results['gt_bboxes'], BaseBoxes):
_results['gt_bboxes'] = _results['gt_bboxes'].numpy()
if isinstance(results['gt_bboxes'], BaseBoxes):
results['gt_bboxes'] = results['gt_bboxes'].numpy()
return _results
else:
raise TypeError(
"auto_box_type requires results['gt_bboxes'] to "
'be BaseBoxes or np.ndarray, but got '
f"{type(results['gt_bboxes'])}")
return wrapper
return decorator
| 10,566 | 34.579125 | 79 |
py
|
ERD
|
ERD-main/mmdet/structures/bbox/horizontal_boxes.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, TypeVar, Union
import cv2
import numpy as np
import torch
from torch import BoolTensor, Tensor
from mmdet.structures.mask.structures import BitmapMasks, PolygonMasks
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import register_box
T = TypeVar('T')
DeviceType = Union[str, torch.device]
MaskType = Union[BitmapMasks, PolygonMasks]
@register_box(name='hbox')
class HorizontalBoxes(BaseBoxes):
"""The horizontal box class used in MMDetection by default.
The ``box_dim`` of ``HorizontalBoxes`` is 4, which means the length of
the last dimension of the data should be 4. Two modes of box data are
supported in ``HorizontalBoxes``:
- 'xyxy': Each row of data indicates (x1, y1, x2, y2), which are the
coordinates of the left-top and right-bottom points.
- 'cxcywh': Each row of data indicates (x, y, w, h), where (x, y) are the
coordinates of the box centers and (w, h) are the width and height.
``HorizontalBoxes`` only restores 'xyxy' mode of data. If the the data is
in 'cxcywh' mode, users need to input ``in_mode='cxcywh'`` and The code
will convert the 'cxcywh' data to 'xyxy' automatically.
Args:
data (Tensor or np.ndarray or Sequence): The box data with shape of
(..., 4).
dtype (torch.dtype, Optional): data type of boxes. Defaults to None.
device (str or torch.device, Optional): device of boxes.
Default to None.
clone (bool): Whether clone ``boxes`` or not. Defaults to True.
mode (str, Optional): the mode of boxes. If it is 'cxcywh', the
`data` will be converted to 'xyxy' mode. Defaults to None.
"""
box_dim: int = 4
def __init__(self,
data: Union[Tensor, np.ndarray],
dtype: torch.dtype = None,
device: DeviceType = None,
clone: bool = True,
in_mode: Optional[str] = None) -> None:
super().__init__(data=data, dtype=dtype, device=device, clone=clone)
if isinstance(in_mode, str):
if in_mode not in ('xyxy', 'cxcywh'):
raise ValueError(f'Get invalid mode {in_mode}.')
if in_mode == 'cxcywh':
self.tensor = self.cxcywh_to_xyxy(self.tensor)
@staticmethod
def cxcywh_to_xyxy(boxes: Tensor) -> Tensor:
"""Convert box coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
Args:
boxes (Tensor): cxcywh boxes tensor with shape of (..., 4).
Returns:
Tensor: xyxy boxes tensor with shape of (..., 4).
"""
ctr, wh = boxes.split((2, 2), dim=-1)
return torch.cat([(ctr - wh / 2), (ctr + wh / 2)], dim=-1)
@staticmethod
def xyxy_to_cxcywh(boxes: Tensor) -> Tensor:
"""Convert box coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
Args:
boxes (Tensor): xyxy boxes tensor with shape of (..., 4).
Returns:
Tensor: cxcywh boxes tensor with shape of (..., 4).
"""
xy1, xy2 = boxes.split((2, 2), dim=-1)
return torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1)
@property
def cxcywh(self) -> Tensor:
"""Return a tensor representing the cxcywh boxes."""
return self.xyxy_to_cxcywh(self.tensor)
@property
def centers(self) -> Tensor:
"""Return a tensor representing the centers of boxes."""
boxes = self.tensor
return (boxes[..., :2] + boxes[..., 2:]) / 2
@property
def areas(self) -> Tensor:
"""Return a tensor representing the areas of boxes."""
boxes = self.tensor
return (boxes[..., 2] - boxes[..., 0]) * (
boxes[..., 3] - boxes[..., 1])
@property
def widths(self) -> Tensor:
"""Return a tensor representing the widths of boxes."""
boxes = self.tensor
return boxes[..., 2] - boxes[..., 0]
@property
def heights(self) -> Tensor:
"""Return a tensor representing the heights of boxes."""
boxes = self.tensor
return boxes[..., 3] - boxes[..., 1]
def flip_(self,
img_shape: Tuple[int, int],
direction: str = 'horizontal') -> None:
"""Flip boxes horizontally or vertically in-place.
Args:
img_shape (Tuple[int, int]): A tuple of image height and width.
direction (str): Flip direction, options are "horizontal",
"vertical" and "diagonal". Defaults to "horizontal"
"""
assert direction in ['horizontal', 'vertical', 'diagonal']
flipped = self.tensor
boxes = flipped.clone()
if direction == 'horizontal':
flipped[..., 0] = img_shape[1] - boxes[..., 2]
flipped[..., 2] = img_shape[1] - boxes[..., 0]
elif direction == 'vertical':
flipped[..., 1] = img_shape[0] - boxes[..., 3]
flipped[..., 3] = img_shape[0] - boxes[..., 1]
else:
flipped[..., 0] = img_shape[1] - boxes[..., 2]
flipped[..., 1] = img_shape[0] - boxes[..., 3]
flipped[..., 2] = img_shape[1] - boxes[..., 0]
flipped[..., 3] = img_shape[0] - boxes[..., 1]
def translate_(self, distances: Tuple[float, float]) -> None:
"""Translate boxes in-place.
Args:
distances (Tuple[float, float]): translate distances. The first
is horizontal distance and the second is vertical distance.
"""
boxes = self.tensor
assert len(distances) == 2
self.tensor = boxes + boxes.new_tensor(distances).repeat(2)
def clip_(self, img_shape: Tuple[int, int]) -> None:
"""Clip boxes according to the image shape in-place.
Args:
img_shape (Tuple[int, int]): A tuple of image height and width.
"""
boxes = self.tensor
boxes[..., 0::2] = boxes[..., 0::2].clamp(0, img_shape[1])
boxes[..., 1::2] = boxes[..., 1::2].clamp(0, img_shape[0])
def rotate_(self, center: Tuple[float, float], angle: float) -> None:
"""Rotate all boxes in-place.
Args:
center (Tuple[float, float]): Rotation origin.
angle (float): Rotation angle represented in degrees. Positive
values mean clockwise rotation.
"""
boxes = self.tensor
rotation_matrix = boxes.new_tensor(
cv2.getRotationMatrix2D(center, -angle, 1))
corners = self.hbox2corner(boxes)
corners = torch.cat(
[corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1)
corners_T = torch.transpose(corners, -1, -2)
corners_T = torch.matmul(rotation_matrix, corners_T)
corners = torch.transpose(corners_T, -1, -2)
self.tensor = self.corner2hbox(corners)
def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:
"""Geometric transformat boxes in-place.
Args:
homography_matrix (Tensor or np.ndarray]):
Shape (3, 3) for geometric transformation.
"""
boxes = self.tensor
if isinstance(homography_matrix, np.ndarray):
homography_matrix = boxes.new_tensor(homography_matrix)
corners = self.hbox2corner(boxes)
corners = torch.cat(
[corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1)
corners_T = torch.transpose(corners, -1, -2)
corners_T = torch.matmul(homography_matrix, corners_T)
corners = torch.transpose(corners_T, -1, -2)
# Convert to homogeneous coordinates by normalization
corners = corners[..., :2] / corners[..., 2:3]
self.tensor = self.corner2hbox(corners)
@staticmethod
def hbox2corner(boxes: Tensor) -> Tensor:
"""Convert box coordinates from (x1, y1, x2, y2) to corners ((x1, y1),
(x2, y1), (x1, y2), (x2, y2)).
Args:
boxes (Tensor): Horizontal box tensor with shape of (..., 4).
Returns:
Tensor: Corner tensor with shape of (..., 4, 2).
"""
x1, y1, x2, y2 = torch.split(boxes, 1, dim=-1)
corners = torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=-1)
return corners.reshape(*corners.shape[:-1], 4, 2)
@staticmethod
def corner2hbox(corners: Tensor) -> Tensor:
"""Convert box coordinates from corners ((x1, y1), (x2, y1), (x1, y2),
(x2, y2)) to (x1, y1, x2, y2).
Args:
corners (Tensor): Corner tensor with shape of (..., 4, 2).
Returns:
Tensor: Horizontal box tensor with shape of (..., 4).
"""
if corners.numel() == 0:
return corners.new_zeros((0, 4))
min_xy = corners.min(dim=-2)[0]
max_xy = corners.max(dim=-2)[0]
return torch.cat([min_xy, max_xy], dim=-1)
def rescale_(self, scale_factor: Tuple[float, float]) -> None:
"""Rescale boxes w.r.t. rescale_factor in-place.
Note:
Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes
w.r.t ``scale_facotr``. The difference is that ``resize_`` only
changes the width and the height of boxes, but ``rescale_`` also
rescales the box centers simultaneously.
Args:
scale_factor (Tuple[float, float]): factors for scaling boxes.
The length should be 2.
"""
boxes = self.tensor
assert len(scale_factor) == 2
scale_factor = boxes.new_tensor(scale_factor).repeat(2)
self.tensor = boxes * scale_factor
def resize_(self, scale_factor: Tuple[float, float]) -> None:
"""Resize the box width and height w.r.t scale_factor in-place.
Note:
Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes
w.r.t ``scale_facotr``. The difference is that ``resize_`` only
changes the width and the height of boxes, but ``rescale_`` also
rescales the box centers simultaneously.
Args:
scale_factor (Tuple[float, float]): factors for scaling box
shapes. The length should be 2.
"""
boxes = self.tensor
assert len(scale_factor) == 2
ctrs = (boxes[..., 2:] + boxes[..., :2]) / 2
wh = boxes[..., 2:] - boxes[..., :2]
scale_factor = boxes.new_tensor(scale_factor)
wh = wh * scale_factor
xy1 = ctrs - 0.5 * wh
xy2 = ctrs + 0.5 * wh
self.tensor = torch.cat([xy1, xy2], dim=-1)
def is_inside(self,
img_shape: Tuple[int, int],
all_inside: bool = False,
allowed_border: int = 0) -> BoolTensor:
"""Find boxes inside the image.
Args:
img_shape (Tuple[int, int]): A tuple of image height and width.
all_inside (bool): Whether the boxes are all inside the image or
part inside the image. Defaults to False.
allowed_border (int): Boxes that extend beyond the image shape
boundary by more than ``allowed_border`` are considered
"outside" Defaults to 0.
Returns:
BoolTensor: A BoolTensor indicating whether the box is inside
the image. Assuming the original boxes have shape (m, n, 4),
the output has shape (m, n).
"""
img_h, img_w = img_shape
boxes = self.tensor
if all_inside:
return (boxes[:, 0] >= -allowed_border) & \
(boxes[:, 1] >= -allowed_border) & \
(boxes[:, 2] < img_w + allowed_border) & \
(boxes[:, 3] < img_h + allowed_border)
else:
return (boxes[..., 0] < img_w + allowed_border) & \
(boxes[..., 1] < img_h + allowed_border) & \
(boxes[..., 2] > -allowed_border) & \
(boxes[..., 3] > -allowed_border)
def find_inside_points(self,
points: Tensor,
is_aligned: bool = False) -> BoolTensor:
"""Find inside box points. Boxes dimension must be 2.
Args:
points (Tensor): Points coordinates. Has shape of (m, 2).
is_aligned (bool): Whether ``points`` has been aligned with boxes
or not. If True, the length of boxes and ``points`` should be
the same. Defaults to False.
Returns:
BoolTensor: A BoolTensor indicating whether a point is inside
boxes. Assuming the boxes has shape of (n, 4), if ``is_aligned``
is False. The index has shape of (m, n). If ``is_aligned`` is
True, m should be equal to n and the index has shape of (m, ).
"""
boxes = self.tensor
assert boxes.dim() == 2, 'boxes dimension must be 2.'
if not is_aligned:
boxes = boxes[None, :, :]
points = points[:, None, :]
else:
assert boxes.size(0) == points.size(0)
x_min, y_min, x_max, y_max = boxes.unbind(dim=-1)
return (points[..., 0] >= x_min) & (points[..., 0] <= x_max) & \
(points[..., 1] >= y_min) & (points[..., 1] <= y_max)
@staticmethod
def overlaps(boxes1: BaseBoxes,
boxes2: BaseBoxes,
mode: str = 'iou',
is_aligned: bool = False,
eps: float = 1e-6) -> Tensor:
"""Calculate overlap between two set of boxes with their types
converted to ``HorizontalBoxes``.
Args:
boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)
or empty.
boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)
or empty.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground). Defaults to "iou".
is_aligned (bool): If True, then m and n must be equal. Defaults
to False.
eps (float): A value added to the denominator for numerical
stability. Defaults to 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
"""
boxes1 = boxes1.convert_to('hbox')
boxes2 = boxes2.convert_to('hbox')
return bbox_overlaps(
boxes1.tensor,
boxes2.tensor,
mode=mode,
is_aligned=is_aligned,
eps=eps)
@staticmethod
def from_instance_masks(masks: MaskType) -> 'HorizontalBoxes':
"""Create horizontal boxes from instance masks.
Args:
masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or
PolygonMasks instance with length of n.
Returns:
:obj:`HorizontalBoxes`: Converted boxes with shape of (n, 4).
"""
num_masks = len(masks)
boxes = np.zeros((num_masks, 4), dtype=np.float32)
if isinstance(masks, BitmapMasks):
x_any = masks.masks.any(axis=1)
y_any = masks.masks.any(axis=2)
for idx in range(num_masks):
x = np.where(x_any[idx, :])[0]
y = np.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
# use +1 for x_max and y_max so that the right and bottom
# boundary of instance masks are fully included by the box
boxes[idx, :] = np.array(
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32)
elif isinstance(masks, PolygonMasks):
for idx, poly_per_obj in enumerate(masks.masks):
# simply use a number that is big enough for comparison with
# coordinates
xy_min = np.array([masks.width * 2, masks.height * 2],
dtype=np.float32)
xy_max = np.zeros(2, dtype=np.float32)
for p in poly_per_obj:
xy = np.array(p).reshape(-1, 2).astype(np.float32)
xy_min = np.minimum(xy_min, np.min(xy, axis=0))
xy_max = np.maximum(xy_max, np.max(xy, axis=0))
boxes[idx, :2] = xy_min
boxes[idx, 2:] = xy_max
else:
raise TypeError(
'`masks` must be `BitmapMasks` or `PolygonMasks`, '
f'but got {type(masks)}.')
return HorizontalBoxes(boxes)
| 16,542 | 39.05569 | 78 |
py
|
ERD
|
ERD-main/mmdet/structures/bbox/base_boxes.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod, abstractproperty, abstractstaticmethod
from typing import List, Optional, Sequence, Tuple, Type, TypeVar, Union
import numpy as np
import torch
from torch import BoolTensor, Tensor
from mmdet.structures.mask.structures import BitmapMasks, PolygonMasks
T = TypeVar('T')
DeviceType = Union[str, torch.device]
IndexType = Union[slice, int, list, torch.LongTensor, torch.cuda.LongTensor,
torch.BoolTensor, torch.cuda.BoolTensor, np.ndarray]
MaskType = Union[BitmapMasks, PolygonMasks]
class BaseBoxes(metaclass=ABCMeta):
"""The base class for 2D box types.
The functions of ``BaseBoxes`` lie in three fields:
- Verify the boxes shape.
- Support tensor-like operations.
- Define abstract functions for 2D boxes.
In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape
w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length
of the last dimension being ``box_dim`` will be regarded as valid.
``BaseBoxes`` will restore them at the field ``tensor``. It's necessary
to override ``box_dim`` in subclass to guarantee the data shape is
correct.
There are many basic tensor-like functions implemented in ``BaseBoxes``.
In most cases, users can operate ``BaseBoxes`` instance like a normal
tensor. To protect the validity of data shape, All tensor-like functions
cannot modify the last dimension of ``self.tensor``.
When creating a new box type, users need to inherit from ``BaseBoxes``
and override abstract methods and specify the ``box_dim``. Then, register
the new box type by using the decorator ``register_box_type``.
Args:
data (Tensor or np.ndarray or Sequence): The box data with shape
(..., box_dim).
dtype (torch.dtype, Optional): data type of boxes. Defaults to None.
device (str or torch.device, Optional): device of boxes.
Default to None.
clone (bool): Whether clone ``boxes`` or not. Defaults to True.
"""
# Used to verify the last dimension length
# Should override it in subclass.
box_dim: int = 0
def __init__(self,
data: Union[Tensor, np.ndarray, Sequence],
dtype: Optional[torch.dtype] = None,
device: Optional[DeviceType] = None,
clone: bool = True) -> None:
if isinstance(data, (np.ndarray, Tensor, Sequence)):
data = torch.as_tensor(data)
else:
raise TypeError('boxes should be Tensor, ndarray, or Sequence, ',
f'but got {type(data)}')
if device is not None or dtype is not None:
data = data.to(dtype=dtype, device=device)
# Clone the data to avoid potential bugs
if clone:
data = data.clone()
# handle the empty input like []
if data.numel() == 0:
data = data.reshape((-1, self.box_dim))
assert data.dim() >= 2 and data.size(-1) == self.box_dim, \
('The boxes dimension must >= 2 and the length of the last '
f'dimension must be {self.box_dim}, but got boxes with '
f'shape {data.shape}.')
self.tensor = data
def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes':
"""Convert self to another box type.
Args:
dst_type (str or type): destination box type.
Returns:
:obj:`BaseBoxes`: destination box type object .
"""
from .box_type import convert_box_type
return convert_box_type(self, dst_type=dst_type)
def empty_boxes(self: T,
dtype: Optional[torch.dtype] = None,
device: Optional[DeviceType] = None) -> T:
"""Create empty box.
Args:
dtype (torch.dtype, Optional): data type of boxes.
device (str or torch.device, Optional): device of boxes.
Returns:
T: empty boxes with shape of (0, box_dim).
"""
empty_box = self.tensor.new_zeros(
0, self.box_dim, dtype=dtype, device=device)
return type(self)(empty_box, clone=False)
def fake_boxes(self: T,
sizes: Tuple[int],
fill: float = 0,
dtype: Optional[torch.dtype] = None,
device: Optional[DeviceType] = None) -> T:
"""Create fake boxes with specific sizes and fill values.
Args:
sizes (Tuple[int]): The size of fake boxes. The last value must
be equal with ``self.box_dim``.
fill (float): filling value. Defaults to 0.
dtype (torch.dtype, Optional): data type of boxes.
device (str or torch.device, Optional): device of boxes.
Returns:
T: Fake boxes with shape of ``sizes``.
"""
fake_boxes = self.tensor.new_full(
sizes, fill, dtype=dtype, device=device)
return type(self)(fake_boxes, clone=False)
def __getitem__(self: T, index: IndexType) -> T:
"""Rewrite getitem to protect the last dimension shape."""
boxes = self.tensor
if isinstance(index, np.ndarray):
index = torch.as_tensor(index, device=self.device)
if isinstance(index, Tensor) and index.dtype == torch.bool:
assert index.dim() < boxes.dim()
elif isinstance(index, tuple):
assert len(index) < boxes.dim()
# `Ellipsis`(...) is commonly used in index like [None, ...].
# When `Ellipsis` is in index, it must be the last item.
if Ellipsis in index:
assert index[-1] is Ellipsis
boxes = boxes[index]
if boxes.dim() == 1:
boxes = boxes.reshape(1, -1)
return type(self)(boxes, clone=False)
def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T:
"""Rewrite setitem to protect the last dimension shape."""
assert type(values) is type(self), \
'The value to be set must be the same box type as self'
values = values.tensor
if isinstance(index, np.ndarray):
index = torch.as_tensor(index, device=self.device)
if isinstance(index, Tensor) and index.dtype == torch.bool:
assert index.dim() < self.tensor.dim()
elif isinstance(index, tuple):
assert len(index) < self.tensor.dim()
# `Ellipsis`(...) is commonly used in index like [None, ...].
# When `Ellipsis` is in index, it must be the last item.
if Ellipsis in index:
assert index[-1] is Ellipsis
self.tensor[index] = values
def __len__(self) -> int:
"""Return the length of self.tensor first dimension."""
return self.tensor.size(0)
def __deepcopy__(self, memo):
"""Only clone the ``self.tensor`` when applying deepcopy."""
cls = self.__class__
other = cls.__new__(cls)
memo[id(self)] = other
other.tensor = self.tensor.clone()
return other
def __repr__(self) -> str:
"""Return a strings that describes the object."""
return self.__class__.__name__ + '(\n' + str(self.tensor) + ')'
def new_tensor(self, *args, **kwargs) -> Tensor:
"""Reload ``new_tensor`` from self.tensor."""
return self.tensor.new_tensor(*args, **kwargs)
def new_full(self, *args, **kwargs) -> Tensor:
"""Reload ``new_full`` from self.tensor."""
return self.tensor.new_full(*args, **kwargs)
def new_empty(self, *args, **kwargs) -> Tensor:
"""Reload ``new_empty`` from self.tensor."""
return self.tensor.new_empty(*args, **kwargs)
def new_ones(self, *args, **kwargs) -> Tensor:
"""Reload ``new_ones`` from self.tensor."""
return self.tensor.new_ones(*args, **kwargs)
def new_zeros(self, *args, **kwargs) -> Tensor:
"""Reload ``new_zeros`` from self.tensor."""
return self.tensor.new_zeros(*args, **kwargs)
def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]:
"""Reload new_zeros from self.tensor."""
# self.tensor.size(dim) cannot work when dim=None.
return self.tensor.size() if dim is None else self.tensor.size(dim)
def dim(self) -> int:
"""Reload ``dim`` from self.tensor."""
return self.tensor.dim()
@property
def device(self) -> torch.device:
"""Reload ``device`` from self.tensor."""
return self.tensor.device
@property
def dtype(self) -> torch.dtype:
"""Reload ``dtype`` from self.tensor."""
return self.tensor.dtype
@property
def shape(self) -> torch.Size:
return self.tensor.shape
def numel(self) -> int:
"""Reload ``numel`` from self.tensor."""
return self.tensor.numel()
def numpy(self) -> np.ndarray:
"""Reload ``numpy`` from self.tensor."""
return self.tensor.numpy()
def to(self: T, *args, **kwargs) -> T:
"""Reload ``to`` from self.tensor."""
return type(self)(self.tensor.to(*args, **kwargs), clone=False)
def cpu(self: T) -> T:
"""Reload ``cpu`` from self.tensor."""
return type(self)(self.tensor.cpu(), clone=False)
def cuda(self: T, *args, **kwargs) -> T:
"""Reload ``cuda`` from self.tensor."""
return type(self)(self.tensor.cuda(*args, **kwargs), clone=False)
def clone(self: T) -> T:
"""Reload ``clone`` from self.tensor."""
return type(self)(self.tensor)
def detach(self: T) -> T:
"""Reload ``detach`` from self.tensor."""
return type(self)(self.tensor.detach(), clone=False)
def view(self: T, *shape: Tuple[int]) -> T:
"""Reload ``view`` from self.tensor."""
return type(self)(self.tensor.view(shape), clone=False)
def reshape(self: T, *shape: Tuple[int]) -> T:
"""Reload ``reshape`` from self.tensor."""
return type(self)(self.tensor.reshape(shape), clone=False)
def expand(self: T, *sizes: Tuple[int]) -> T:
"""Reload ``expand`` from self.tensor."""
return type(self)(self.tensor.expand(sizes), clone=False)
def repeat(self: T, *sizes: Tuple[int]) -> T:
"""Reload ``repeat`` from self.tensor."""
return type(self)(self.tensor.repeat(sizes), clone=False)
def transpose(self: T, dim0: int, dim1: int) -> T:
"""Reload ``transpose`` from self.tensor."""
ndim = self.tensor.dim()
assert dim0 != -1 and dim0 != ndim - 1
assert dim1 != -1 and dim1 != ndim - 1
return type(self)(self.tensor.transpose(dim0, dim1), clone=False)
def permute(self: T, *dims: Tuple[int]) -> T:
"""Reload ``permute`` from self.tensor."""
assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1
return type(self)(self.tensor.permute(dims), clone=False)
def split(self: T,
split_size_or_sections: Union[int, Sequence[int]],
dim: int = 0) -> List[T]:
"""Reload ``split`` from self.tensor."""
assert dim != -1 and dim != self.tensor.dim() - 1
boxes_list = self.tensor.split(split_size_or_sections, dim=dim)
return [type(self)(boxes, clone=False) for boxes in boxes_list]
def chunk(self: T, chunks: int, dim: int = 0) -> List[T]:
"""Reload ``chunk`` from self.tensor."""
assert dim != -1 and dim != self.tensor.dim() - 1
boxes_list = self.tensor.chunk(chunks, dim=dim)
return [type(self)(boxes, clone=False) for boxes in boxes_list]
def unbind(self: T, dim: int = 0) -> T:
"""Reload ``unbind`` from self.tensor."""
assert dim != -1 and dim != self.tensor.dim() - 1
boxes_list = self.tensor.unbind(dim=dim)
return [type(self)(boxes, clone=False) for boxes in boxes_list]
def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T:
"""Reload ``flatten`` from self.tensor."""
assert end_dim != -1 and end_dim != self.tensor.dim() - 1
return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False)
def squeeze(self: T, dim: Optional[int] = None) -> T:
"""Reload ``squeeze`` from self.tensor."""
boxes = self.tensor.squeeze() if dim is None else \
self.tensor.squeeze(dim)
return type(self)(boxes, clone=False)
def unsqueeze(self: T, dim: int) -> T:
"""Reload ``unsqueeze`` from self.tensor."""
assert dim != -1 and dim != self.tensor.dim()
return type(self)(self.tensor.unsqueeze(dim), clone=False)
@classmethod
def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:
"""Cancatenates a box instance list into one single box instance.
Similar to ``torch.cat``.
Args:
box_list (Sequence[T]): A sequence of box instances.
dim (int): The dimension over which the box are concatenated.
Defaults to 0.
Returns:
T: Concatenated box instance.
"""
assert isinstance(box_list, Sequence)
if len(box_list) == 0:
raise ValueError('box_list should not be a empty list.')
assert dim != -1 and dim != box_list[0].dim() - 1
assert all(isinstance(boxes, cls) for boxes in box_list)
th_box_list = [boxes.tensor for boxes in box_list]
return cls(torch.cat(th_box_list, dim=dim), clone=False)
@classmethod
def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:
"""Concatenates a sequence of tensors along a new dimension. Similar to
``torch.stack``.
Args:
box_list (Sequence[T]): A sequence of box instances.
dim (int): Dimension to insert. Defaults to 0.
Returns:
T: Concatenated box instance.
"""
assert isinstance(box_list, Sequence)
if len(box_list) == 0:
raise ValueError('box_list should not be a empty list.')
assert dim != -1 and dim != box_list[0].dim()
assert all(isinstance(boxes, cls) for boxes in box_list)
th_box_list = [boxes.tensor for boxes in box_list]
return cls(torch.stack(th_box_list, dim=dim), clone=False)
@abstractproperty
def centers(self) -> Tensor:
"""Return a tensor representing the centers of boxes."""
pass
@abstractproperty
def areas(self) -> Tensor:
"""Return a tensor representing the areas of boxes."""
pass
@abstractproperty
def widths(self) -> Tensor:
"""Return a tensor representing the widths of boxes."""
pass
@abstractproperty
def heights(self) -> Tensor:
"""Return a tensor representing the heights of boxes."""
pass
@abstractmethod
def flip_(self,
img_shape: Tuple[int, int],
direction: str = 'horizontal') -> None:
"""Flip boxes horizontally or vertically in-place.
Args:
img_shape (Tuple[int, int]): A tuple of image height and width.
direction (str): Flip direction, options are "horizontal",
"vertical" and "diagonal". Defaults to "horizontal"
"""
pass
@abstractmethod
def translate_(self, distances: Tuple[float, float]) -> None:
"""Translate boxes in-place.
Args:
distances (Tuple[float, float]): translate distances. The first
is horizontal distance and the second is vertical distance.
"""
pass
@abstractmethod
def clip_(self, img_shape: Tuple[int, int]) -> None:
"""Clip boxes according to the image shape in-place.
Args:
img_shape (Tuple[int, int]): A tuple of image height and width.
"""
pass
@abstractmethod
def rotate_(self, center: Tuple[float, float], angle: float) -> None:
"""Rotate all boxes in-place.
Args:
center (Tuple[float, float]): Rotation origin.
angle (float): Rotation angle represented in degrees. Positive
values mean clockwise rotation.
"""
pass
@abstractmethod
def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:
"""Geometric transformat boxes in-place.
Args:
homography_matrix (Tensor or np.ndarray]):
Shape (3, 3) for geometric transformation.
"""
pass
@abstractmethod
def rescale_(self, scale_factor: Tuple[float, float]) -> None:
"""Rescale boxes w.r.t. rescale_factor in-place.
Note:
Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes
w.r.t ``scale_facotr``. The difference is that ``resize_`` only
changes the width and the height of boxes, but ``rescale_`` also
rescales the box centers simultaneously.
Args:
scale_factor (Tuple[float, float]): factors for scaling boxes.
The length should be 2.
"""
pass
@abstractmethod
def resize_(self, scale_factor: Tuple[float, float]) -> None:
"""Resize the box width and height w.r.t scale_factor in-place.
Note:
Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes
w.r.t ``scale_facotr``. The difference is that ``resize_`` only
changes the width and the height of boxes, but ``rescale_`` also
rescales the box centers simultaneously.
Args:
scale_factor (Tuple[float, float]): factors for scaling box
shapes. The length should be 2.
"""
pass
@abstractmethod
def is_inside(self,
img_shape: Tuple[int, int],
all_inside: bool = False,
allowed_border: int = 0) -> BoolTensor:
"""Find boxes inside the image.
Args:
img_shape (Tuple[int, int]): A tuple of image height and width.
all_inside (bool): Whether the boxes are all inside the image or
part inside the image. Defaults to False.
allowed_border (int): Boxes that extend beyond the image shape
boundary by more than ``allowed_border`` are considered
"outside" Defaults to 0.
Returns:
BoolTensor: A BoolTensor indicating whether the box is inside
the image. Assuming the original boxes have shape (m, n, box_dim),
the output has shape (m, n).
"""
pass
@abstractmethod
def find_inside_points(self,
points: Tensor,
is_aligned: bool = False) -> BoolTensor:
"""Find inside box points. Boxes dimension must be 2.
Args:
points (Tensor): Points coordinates. Has shape of (m, 2).
is_aligned (bool): Whether ``points`` has been aligned with boxes
or not. If True, the length of boxes and ``points`` should be
the same. Defaults to False.
Returns:
BoolTensor: A BoolTensor indicating whether a point is inside
boxes. Assuming the boxes has shape of (n, box_dim), if
``is_aligned`` is False. The index has shape of (m, n). If
``is_aligned`` is True, m should be equal to n and the index has
shape of (m, ).
"""
pass
@abstractstaticmethod
def overlaps(boxes1: 'BaseBoxes',
boxes2: 'BaseBoxes',
mode: str = 'iou',
is_aligned: bool = False,
eps: float = 1e-6) -> Tensor:
"""Calculate overlap between two set of boxes with their types
converted to the present box type.
Args:
boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)
or empty.
boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)
or empty.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground). Defaults to "iou".
is_aligned (bool): If True, then m and n must be equal. Defaults
to False.
eps (float): A value added to the denominator for numerical
stability. Defaults to 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
"""
pass
@abstractstaticmethod
def from_instance_masks(masks: MaskType) -> 'BaseBoxes':
"""Create boxes from instance masks.
Args:
masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or
PolygonMasks instance with length of n.
Returns:
:obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim).
"""
pass
| 20,934 | 37.063636 | 79 |
py
|
ERD
|
ERD-main/mmdet/structures/bbox/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (autocast_box_type, convert_box_type, get_box_type,
register_box, register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, cat_boxes, corner2bbox,
distance2bbox, empty_box_as, find_inside_bboxes,
get_box_tensor, get_box_wh, roi2bbox, scale_boxes,
stack_boxes)
__all__ = [
'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',
'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',
'register_box_converter', 'HorizontalBoxes', 'autocast_box_type',
'cat_boxes', 'stack_boxes', 'scale_boxes', 'get_box_wh', 'get_box_tensor',
'empty_box_as'
]
| 1,319 | 51.8 | 78 |
py
|
ERD
|
ERD-main/mmdet/structures/bbox/bbox_overlaps.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
# clamp for cpu float16, tensor fp16 has no clamp implementation
return x.float().clamp(min, max).half()
return x.clamp(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned`` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows, ))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
| 7,323 | 35.62 | 78 |
py
|
ERD
|
ERD-main/mmdet/structures/bbox/transforms.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from mmdet.structures.bbox import BaseBoxes
def find_inside_bboxes(bboxes: Tensor, img_h: int, img_w: int) -> Tensor:
"""Find bboxes as long as a part of bboxes is inside the image.
Args:
bboxes (Tensor): Shape (N, 4).
img_h (int): Image height.
img_w (int): Image width.
Returns:
Tensor: Index of the remaining bboxes.
"""
inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \
& (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0)
return inside_inds
def bbox_flip(bboxes: Tensor,
img_shape: Tuple[int],
direction: str = 'horizontal') -> Tensor:
"""Flip bboxes horizontally or vertically.
Args:
bboxes (Tensor): Shape (..., 4*k)
img_shape (Tuple[int]): Image shape.
direction (str): Flip direction, options are "horizontal", "vertical",
"diagonal". Default: "horizontal"
Returns:
Tensor: Flipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
assert direction in ['horizontal', 'vertical', 'diagonal']
flipped = bboxes.clone()
if direction == 'horizontal':
flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
elif direction == 'vertical':
flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
else:
flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
return flipped
def bbox_mapping(bboxes: Tensor,
img_shape: Tuple[int],
scale_factor: Union[float, Tuple[float]],
flip: bool,
flip_direction: str = 'horizontal') -> Tensor:
"""Map bboxes from the original image scale to testing scale."""
new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
return new_bboxes
def bbox_mapping_back(bboxes: Tensor,
img_shape: Tuple[int],
scale_factor: Union[float, Tuple[float]],
flip: bool,
flip_direction: str = 'horizontal') -> Tensor:
"""Map bboxes from testing scale to original image scale."""
new_bboxes = bbox_flip(bboxes, img_shape,
flip_direction) if flip else bboxes
new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)
return new_bboxes.view(bboxes.shape)
def bbox2roi(bbox_list: List[Union[Tensor, BaseBoxes]]) -> Tensor:
"""Convert a list of bboxes to roi format.
Args:
bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes
corresponding to a batch of images.
Returns:
Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the
different box types. For example, If the box type in ``bbox_list``
is HorizontalBoxes, the output shape is (n, 5). Each row of data
indicates [batch_ind, x1, y1, x2, y2].
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
bboxes = get_box_tensor(bboxes)
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes], dim=-1)
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois
def roi2bbox(rois: Tensor) -> List[Tensor]:
"""Convert rois to bounding box format.
Args:
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
List[Tensor]: Converted boxes of corresponding rois.
"""
bbox_list = []
img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
for img_id in img_ids:
inds = (rois[:, 0] == img_id.item())
bbox = rois[inds, 1:]
bbox_list.append(bbox)
return bbox_list
# TODO remove later
def bbox2result(bboxes: Union[Tensor, np.ndarray], labels: Union[Tensor,
np.ndarray],
num_classes: int) -> List[np.ndarray]:
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor | np.ndarray): shape (n, 5)
labels (Tensor | np.ndarray): shape (n, )
num_classes (int): class number, including background class
Returns:
List(np.ndarray]): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
else:
if isinstance(bboxes, torch.Tensor):
bboxes = bboxes.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes)]
def distance2bbox(
points: Tensor,
distance: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None
) -> Tensor:
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
max_shape (Union[Sequence[int], Tensor, Sequence[Sequence[int]]],
optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
x1 = points[..., 0] - distance[..., 0]
y1 = points[..., 1] - distance[..., 1]
x2 = points[..., 0] + distance[..., 2]
y2 = points[..., 1] + distance[..., 3]
bboxes = torch.stack([x1, y1, x2, y2], -1)
if max_shape is not None:
if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export():
# speed up
bboxes[:, 0::2].clamp_(min=0, max=max_shape[1])
bboxes[:, 1::2].clamp_(min=0, max=max_shape[0])
return bboxes
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
# TODO: delete
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = x1.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(x1)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = x1.new_tensor(0)
max_xy = torch.cat([max_shape, max_shape],
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
def bbox2distance(points: Tensor,
bbox: Tensor,
max_dis: Optional[float] = None,
eps: float = 0.1) -> Tensor:
"""Decode bounding box based on distances.
Args:
points (Tensor): Shape (n, 2) or (b, n, 2), [x, y].
bbox (Tensor): Shape (n, 4) or (b, n, 4), "xyxy" format
max_dis (float, optional): Upper bound of the distance.
eps (float): a small value to ensure target < max_dis, instead <=
Returns:
Tensor: Decoded distances.
"""
left = points[..., 0] - bbox[..., 0]
top = points[..., 1] - bbox[..., 1]
right = bbox[..., 2] - points[..., 0]
bottom = bbox[..., 3] - points[..., 1]
if max_dis is not None:
left = left.clamp(min=0, max=max_dis - eps)
top = top.clamp(min=0, max=max_dis - eps)
right = right.clamp(min=0, max=max_dis - eps)
bottom = bottom.clamp(min=0, max=max_dis - eps)
return torch.stack([left, top, right, bottom], -1)
def bbox_rescale(bboxes: Tensor, scale_factor: float = 1.0) -> Tensor:
"""Rescale bounding box w.r.t. scale_factor.
Args:
bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois
scale_factor (float): rescale factor
Returns:
Tensor: Rescaled bboxes.
"""
if bboxes.size(1) == 5:
bboxes_ = bboxes[:, 1:]
inds_ = bboxes[:, 0]
else:
bboxes_ = bboxes
cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5
cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5
w = bboxes_[:, 2] - bboxes_[:, 0]
h = bboxes_[:, 3] - bboxes_[:, 1]
w = w * scale_factor
h = h * scale_factor
x1 = cx - 0.5 * w
x2 = cx + 0.5 * w
y1 = cy - 0.5 * h
y2 = cy + 0.5 * h
if bboxes.size(1) == 5:
rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
else:
rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return rescaled_bboxes
def bbox_cxcywh_to_xyxy(bbox: Tensor) -> Tensor:
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
return torch.cat(bbox_new, dim=-1)
def bbox_xyxy_to_cxcywh(bbox: Tensor) -> Tensor:
"""Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
return torch.cat(bbox_new, dim=-1)
def bbox2corner(bboxes: torch.Tensor) -> torch.Tensor:
"""Convert bbox coordinates from (x1, y1, x2, y2) to corners ((x1, y1),
(x2, y1), (x1, y2), (x2, y2)).
Args:
bboxes (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Shape (n*4, 2) for corners.
"""
x1, y1, x2, y2 = torch.split(bboxes, 1, dim=1)
return torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=1).reshape(-1, 2)
def corner2bbox(corners: torch.Tensor) -> torch.Tensor:
"""Convert bbox coordinates from corners ((x1, y1), (x2, y1), (x1, y2),
(x2, y2)) to (x1, y1, x2, y2).
Args:
corners (Tensor): Shape (n*4, 2) for corners.
Returns:
Tensor: Shape (n, 4) for bboxes.
"""
corners = corners.reshape(-1, 4, 2)
min_xy = corners.min(dim=1)[0]
max_xy = corners.max(dim=1)[0]
return torch.cat([min_xy, max_xy], dim=1)
def bbox_project(
bboxes: Union[torch.Tensor, np.ndarray],
homography_matrix: Union[torch.Tensor, np.ndarray],
img_shape: Optional[Tuple[int, int]] = None
) -> Union[torch.Tensor, np.ndarray]:
"""Geometric transformation for bbox.
Args:
bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes.
homography_matrix (Union[torch.Tensor, np.ndarray]):
Shape (3, 3) for geometric transformation.
img_shape (Tuple[int, int], optional): Image shape. Defaults to None.
Returns:
Union[torch.Tensor, np.ndarray]: Converted bboxes.
"""
bboxes_type = type(bboxes)
if bboxes_type is np.ndarray:
bboxes = torch.from_numpy(bboxes)
if isinstance(homography_matrix, np.ndarray):
homography_matrix = torch.from_numpy(homography_matrix)
corners = bbox2corner(bboxes)
corners = torch.cat(
[corners, corners.new_ones(corners.shape[0], 1)], dim=1)
corners = torch.matmul(homography_matrix, corners.t()).t()
# Convert to homogeneous coordinates by normalization
corners = corners[:, :2] / corners[:, 2:3]
bboxes = corner2bbox(corners)
if img_shape is not None:
bboxes[:, 0::2] = bboxes[:, 0::2].clamp(0, img_shape[1])
bboxes[:, 1::2] = bboxes[:, 1::2].clamp(0, img_shape[0])
if bboxes_type is np.ndarray:
bboxes = bboxes.numpy()
return bboxes
def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]],
dim: int = 0) -> Union[Tensor, BaseBoxes]:
"""Concatenate boxes with type of tensor or box type.
Args:
data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors
or box types need to be concatenated.
dim (int): The dimension over which the box are concatenated.
Defaults to 0.
Returns:
Union[Tensor, :obj`BaseBoxes`]: Concatenated results.
"""
if data_list and isinstance(data_list[0], BaseBoxes):
return data_list[0].cat(data_list, dim=dim)
else:
return torch.cat(data_list, dim=dim)
def stack_boxes(data_list: List[Union[Tensor, BaseBoxes]],
dim: int = 0) -> Union[Tensor, BaseBoxes]:
"""Stack boxes with type of tensor or box type.
Args:
data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors
or box types need to be stacked.
dim (int): The dimension over which the box are stacked.
Defaults to 0.
Returns:
Union[Tensor, :obj`BaseBoxes`]: Stacked results.
"""
if data_list and isinstance(data_list[0], BaseBoxes):
return data_list[0].stack(data_list, dim=dim)
else:
return torch.stack(data_list, dim=dim)
def scale_boxes(boxes: Union[Tensor, BaseBoxes],
scale_factor: Tuple[float, float]) -> Union[Tensor, BaseBoxes]:
"""Scale boxes with type of tensor or box type.
Args:
boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type
can be a tensor or a box type.
scale_factor (Tuple[float, float]): factors for scaling boxes.
The length should be 2.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes.
"""
if isinstance(boxes, BaseBoxes):
boxes.rescale_(scale_factor)
return boxes
else:
# Tensor boxes will be treated as horizontal boxes
repeat_num = int(boxes.size(-1) / 2)
scale_factor = boxes.new_tensor(scale_factor).repeat((1, repeat_num))
return boxes * scale_factor
def get_box_wh(boxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor, Tensor]:
"""Get the width and height of boxes with type of tensor or box type.
Args:
boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor
or box type.
Returns:
Tuple[Tensor, Tensor]: the width and height of boxes.
"""
if isinstance(boxes, BaseBoxes):
w = boxes.widths
h = boxes.heights
else:
# Tensor boxes will be treated as horizontal boxes by defaults
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
return w, h
def get_box_tensor(boxes: Union[Tensor, BaseBoxes]) -> Tensor:
"""Get tensor data from box type boxes.
Args:
boxes (Tensor or BaseBoxes): boxes with type of tensor or box type.
If its type is a tensor, the boxes will be directly returned.
If its type is a box type, the `boxes.tensor` will be returned.
Returns:
Tensor: boxes tensor.
"""
if isinstance(boxes, BaseBoxes):
boxes = boxes.tensor
return boxes
def empty_box_as(boxes: Union[Tensor, BaseBoxes]) -> Union[Tensor, BaseBoxes]:
"""Generate empty box according to input ``boxes` type and device.
Args:
boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor
or box type.
Returns:
Union[Tensor, BaseBoxes]: Generated empty box.
"""
if isinstance(boxes, BaseBoxes):
return boxes.empty_boxes()
else:
# Tensor boxes will be treated as horizontal boxes by defaults
return boxes.new_zeros(0, 4)
| 16,133 | 33.474359 | 79 |
py
|
fl-analysis
|
fl-analysis-master/documentation/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.abspath('.'), '../'))
autodoc_member_order = 'groupwise'
# -- Project information -----------------------------------------------------
project = 'Adversarial FL'
copyright = '2021, Hidde Lycklama à Nijeholt'
author = 'Hidde Lycklama à Nijeholt'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'press'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 1,992 | 35.236364 | 79 |
py
|
fl-analysis
|
fl-analysis-master/src/main.py
|
import tensorflow as tf
import numpy as np
from src.client_attacks import Attack
from src.config_cli import get_config
from src.federated_averaging import FederatedAveraging
from src.tf_model import Model
from src.config.definitions import Config
import logging
logger = logging.getLogger(__name__)
def load_model():
if config.environment.load_model is not None:
model = tf.keras.models.load_model(config.environment.load_model) # Load with weights
else:
model = Model.create_model(
config.client.model_name, config.server.intrinsic_dimension,
config.client.model_weight_regularization, config.client.disable_bn)
# save_model(model)
return model
def save_model(model):
weights = np.concatenate([x.flatten() for x in model.get_weights()])
np.savetxt("resnet18_intrinsic_40k.txt", weights)
def main():
import torch
if torch.cuda.is_available():
torch.cuda.current_device()
limit_gpu_mem()
# from src.torch_compat.anticipate_lenet import LeNet
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# m1 = LeNet(10)
# m2 = m1.to(device) # try
models = [load_model()]
if config.client.malicious is not None:
config.client.malicious.attack_type = Attack.UNTARGETED.value \
if config.client.malicious.objective['name'] == "UntargetedAttack" else Attack.BACKDOOR.value
server_model = FederatedAveraging(config, models, args.config_filepath)
server_model.init()
server_model.fit()
return
# if args.hyperparameter_tuning.lower() == "true":
# tune_hyper(args, config)
# elif len(args.permute_dataset) > 0:
# # Permute, load single attack
# if not Model.model_supported(args.model_name, args.dataset):
# raise Exception(
# f'Model {args.model_name} does not support {args.dataset}! '
# f'Check method Model.model_supported for the valid combinations.')
#
# attack = load_attacks()[0]
# amount_eval = 3
# amount_select = 80
# from itertools import combinations
# import random
# total_combinations = list(combinations(set(args.permute_dataset), amount_eval))
# indices = sorted(random.sample(range(len(total_combinations)), amount_select))
# logger.info(f"Running {len(total_combinations)} combinations!")
# for i, p in enumerate([total_combinations[i] for i in indices]):
# train = list(set(args.permute_dataset) - set(p))
# eval = list(p)
# attack['backdoor']['train'] = train
# attack['backdoor']['test'] = eval
# config['attack'] = attack
# config['attack_type'] = Attack.UNTARGETED.value \
# if attack['objective']['name'] == "UntargetedAttack" else Attack.BACKDOOR.value
#
# logger.info(f"Running backdoor with samples {eval} {train}")
#
# models = [load_model() for i in range(args.workers)]
#
# server_model = FederatedAveraging(config, models, f"attack-{i}")
# server_model.init()
# server_model.fit()
# else:
# if not Model.model_supported(args.model_name, args.dataset):
# raise Exception(
# f'Model {args.model_name} does not support {args.dataset}! '
# f'Check method Model.model_supported for the valid combinations.')
#
# for i, attack in enumerate(load_attacks()):
# config['attack'] = attack
# config['attack_type'] = Attack.UNTARGETED.value \
# if attack['objective']['name'] == "UntargetedAttack" else Attack.BACKDOOR.value
#
# logger.info(f"Running attack objective {config['attack_type']}"
# f" (evasion: {attack['evasion']['name'] if 'evasion' in attack else None})")
#
# models = [load_model() for i in range(args.workers)]
#
# server_model = FederatedAveraging(config, models, f"attack-{i}")
# server_model.init()
# server_model.fit()
def limit_gpu_mem():
limit_mb = config.environment.limit_tf_gpu_mem_mb
if limit_mb is None:
return
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only allocate 1GB of memory on the first GPU
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=limit_mb)]) # Notice here
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
if __name__ == '__main__':
config: Config
config, args = get_config()
np.random.seed(config.environment.seed)
tf.random.set_seed(config.environment.seed)
main()
| 5,117 | 38.068702 | 118 |
py
|
fl-analysis
|
fl-analysis-master/src/learning_rate_decay.py
|
import numpy as np
import tensorflow as tf
class StepDecay:
def __init__(self, initial_lr, total_steps, bound1=0.33, bound2=0.66):
# store the base initial learning rate, drop factor, and
# epochs to drop every
self.initial_lr = initial_lr
self.total_steps = total_steps
self.step = 0
self.mul = 1
self.bound1 = bound1
self.bound2 = bound2
def __call__(self):
lr = self.initial_lr * self.mul
if self.step > self.bound2 * self.total_steps:
lr = lr * 0.01
elif self.step > self.bound1 * self.total_steps:
lr = lr * 0.1
# print(f"LR: {lr}")
return float(lr)
def apply_step(self):
self.step += 1
| 746 | 24.758621 | 74 |
py
|
fl-analysis
|
fl-analysis-master/src/test_util.py
|
from unittest import TestCase
import numpy as np
import util
class UtilityTest(TestCase):
def test_aggregate_weights_masked_simple(self):
current_weights = [VariableWrapper([0.]) for _ in range(8)]
w1 = np.array([[1], [1], [1], [1], [2], [2], [2], [2]])
w2 = np.array([[3], [3], [3], [3], [4], [4], [4], [4]])
w3 = np.array([[5], [5], [5], [5], [6], [6], [6], [6]])
w1m = np.array([[True], [True], [True], [False], [True], [True], [True], [False]])
w2m = np.array([[False], [True], [True], [True], [False], [True], [True], [True]])
w3m = np.array([[False], [True], [False], [True], [False], [True], [False], [True]])
res = util.aggregate_weights_masked(current_weights, 1.0, 3.0, 1.0, [w1m, w2m, w3m], [w1, w2, w3])
np.testing.assert_array_almost_equal(np.array([
[1./3.], [3], [1.3333], [2.666667], [2./3.], [4], [2], [3.33333]
]), res, decimal=3)
def test_aggregate_weights_masked_emptynan(self):
current_weights = [VariableWrapper([5.])] + [VariableWrapper([0.]) for _ in range(7)]
w1 = np.array([[1], [1], [1], [1], [2], [2], [2], [2]])
w2 = np.array([[3], [3], [3], [3], [4], [4], [4], [4]])
w3 = np.array([[5], [5], [5], [5], [6], [6], [6], [6]])
w1m = np.array([[False], [True], [True], [False], [True], [True], [True], [False]])
w2m = np.array([[False], [True], [True], [True], [False], [True], [True], [True]])
w3m = np.array([[False], [True], [False], [True], [False], [True], [False], [True]])
res = util.aggregate_weights_masked(current_weights, 1.0, 3.0, 1.0, [w1m, w2m, w3m], [w1, w2, w3])
np.testing.assert_array_almost_equal(np.array([
[5], [3], [1.33333], [2.6666667], [2./3.], [4.], [2.], [3.33333]
]), res, decimal=3)
def test_aggregate_weights_masked_multidim(self):
current_weights = [VariableWrapper([2., 2.]), VariableWrapper([2., 2.])]
w1 = np.array([[1, 1], [1, 1]])
w2 = np.array([[3, 3], [3, 3]])
w3 = np.array([[5, 5], [5, 5]])
w1m = np.array([[False, False], [True, True]])
w2m = np.array([[True, False], [True, False]])
w3m = np.array([[False, True], [False, True]])
res = util.aggregate_weights_masked(current_weights, 1.0, 3.0, 1.0, [w1m, w2m, w3m], [w1, w2, w3])
np.testing.assert_array_almost_equal(np.array([
[2.33333, 3], [2, 2.66667]
]), res, decimal=3)
class VariableWrapper:
"""Wrapper to wrap variable in to provide compatibility with Tensorflow"""
def __init__(self, var):
self.var = var
def __add__(self, o):
return self.var + o
def numpy(self):
return self.var
| 2,747 | 41.9375 | 106 |
py
|
fl-analysis
|
fl-analysis-master/src/federated_averaging.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import time
import logging
from copy import deepcopy
from pathlib import Path
from threading import Thread
from typing import List, Any
import numpy as np
import tensorflow as tf
# from src.torch_compat.data_holder import DataHolder
tf.get_logger().setLevel('DEBUG')
import src.config as config
from src.attack_dataset_config import AttackDatasetConfig
from src.aggregation import aggregators
from src.data import data_loader
from src.client_attacks import Attack
from src.client import Client
from src.util import log_data, create_dropout_mask, aggregate_weights_masked, flatten
from src.data.tf_data import Dataset, ImageGeneratorDataset, GeneratorDataset, PixelPatternDataset
from src.tf_model import Model
from src.data.tf_data_global import IIDGlobalDataset, NonIIDGlobalDataset, DirichletDistributionDivider
class FederatedAveraging:
"""Implementation of federated averaging algorithm."""
client_objs: List[Client]
federated_dropout: config.definitions.FederatedDropout
def __init__(self, config, models, config_path):
"""
:type config: config_cli.Config
"""
self.config = config
self.num_clients = config.environment.num_clients
self.num_selected_clients = config.environment.num_selected_clients
self.num_malicious_clients = config.environment.num_malicious_clients
self.attack_frequency = config.environment.attack_frequency
self.attack_type = Attack(config.client.malicious.attack_type) \
if config.client.malicious is not None else None
self.num_rounds = config.server.num_rounds
self.batch_size = config.client.benign_training.batch_size
self.federated_dropout = config.server.federated_dropout
self.attack_dataset = AttackDatasetConfig(**self.config.client.malicious.backdoor) \
if self.config.client.malicious is not None and self.config.client.malicious.backdoor is not None else None
self.print_every = config.environment.print_every
self.model_name = config.client.model_name
self.experiment_name = config.environment.experiment_name
self.model = models[0] # use first for me
self.client_models = models
self.global_weights = self.model.get_weights()
if config.environment.use_config_dir:
self.experiment_dir = os.path.dirname(config_path)
self.experiment_root_dir = os.path.dirname(self.experiment_dir)
else:
self.experiment_root_dir = os.path.join(os.getcwd(), 'experiments')
self.experiment_dir = os.path.join(self.experiment_root_dir, self.experiment_name)
self.client_updates_dir = os.path.join(self.experiment_dir, 'updates')
self.global_model_dir = os.path.join(self.experiment_dir, 'models')
self.norms_dir = os.path.join(self.experiment_dir, 'norms')
# self.clients_data = []
self.malicious_clients = np.zeros(self.num_clients, dtype=bool)
if self.num_malicious_clients > 0:
if config.environment.malicious_client_indices is not None:
malicious_indices = config.environment.malicious_client_indices
else:
malicious_indices = np.random.choice(self.num_clients, self.num_malicious_clients, replace=False)
assert len(malicious_indices) == self.num_malicious_clients, \
"Malicious indices must equal total number of malicious clients!"
self.malicious_clients[malicious_indices] = True
self.global_dataset = self.build_dataset()
# self.malicious_clients[np.random.choice(self.num_clients, self.num_malicious_clients, replace=False)] = True
self.client_objs = []
self.client_model = None
self.client_config = {}
self.writer = None
self.keep_history = config.environment.save_history
self.parameters_history = [] if self.keep_history else None
self.previous_round_weights = None # Holder
self.test_accuracy = tf.keras.metrics.Mean(name='test_accuracy')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
self.aggregator = aggregators.build_aggregator(config)
def _init_log_directories(self):
"""Initializes directories in which log files are stored"""
if not os.path.isdir(self.experiment_root_dir):
os.mkdir(self.experiment_root_dir)
if not os.path.isdir(self.experiment_dir):
os.mkdir(self.experiment_dir)
if not os.path.isdir(self.client_updates_dir):
os.mkdir(self.client_updates_dir)
if not os.path.isdir(self.global_model_dir):
os.mkdir(self.global_model_dir)
if (self.config.environment.save_norms or self.config.environment.save_weight_outside_bound) and not os.path.isdir(self.norms_dir):
os.mkdir(self.norms_dir)
# remove everything for this directory, if we do not have set directory
if not self.config.environment.use_config_dir:
for filename in Path(self.experiment_dir).glob('**/*'):
if not os.path.isdir(str(filename)):
os.remove(str(filename))
# with open(os.path.join(self.experiment_dir, 'config.json'), 'w') as fp:
# self.config.to_yaml()
from src.custom_summary_writer import CustomSummaryWriter
self.writer = CustomSummaryWriter(self.experiment_dir)
def init(self):
""" Loads data, creates clients and client configuration."""
self._init_log_directories()
# self.client_config = {
# 'attack': self.config['attack'],
# 'attack_type': self.attack_type,
# 'batch_size': self.config['batch_size'],
# 'untargeted_after_training': self.config['untargeted_after_training'],
# 'targeted_deterministic_attack_objective': self.config['targeted_deterministic_attack_objective'],
# 'targeted_attack_objective': self.config['targeted_attack_objective'],
# 'targeted_attack_benign_first': self.config['targeted_attack_benign_first'],
# 'scale_attack': self.config['scale_attack'],
# 'scale_attack_weight': self.config['scale_attack_weight'],
# 'aggregator': self.config['aggregator'],
# 'trimmed_mean_beta': self.config['trimmed_mean_beta'],
# 'num_epochs': self.config['num_epochs'],
# 'optimizer': self.config['optimizer'],
# 'learning_rate': self.config['learning_rate'],
# 'lr_decay': self.config['lr_decay'],
# 'decay_steps': self.config['decay_steps'],
# 'decay_rate': self.config['decay_rate'],
# 'decay_boundaries': self.config['decay_boundaries'],
# 'decay_values': self.config['decay_values'],
# 'mal_learning_rate': self.config['mal_learning_rate'],
# 'mal_decay_steps': self.config['mal_decay_steps'],
# 'mal_decay_rate': self.config['mal_decay_rate'],
# 'poison_samples': self.config['poison_samples'],
# 'mal_num_batch': self.config['mal_num_batch'],
# 'mal_step_learning_rate': self.config['mal_step_learning_rate'],
# 'mal_num_epochs': self.config['mal_num_epochs'],
# 'mal_num_epochs_max': self.config['mal_num_epochs_max'],
# 'mal_target_loss': self.config['mal_target_loss'],
# 'model_name': self.config['model_name'],
# 'clip': self.config['clip'],
# 'clip_probability': self.config['clip_probability'],
# 'clip_l2': self.config['clip_l2'],
# 'clip_layers': self.config['clip_layers'],
# 'backdoor_stealth': self.config['backdoor_stealth'],
# 'estimate_other_updates': self.config['estimate_other_updates'],
# 'attack_after': self.config['attack_after'],
# 'attack_stop_after': self.config['attack_stop_after'],
# 'contamination_model': self.config['contamination_model'],
# 'contamination_rate': self.config['contamination_rate'],
# 'gaussian_noise': self.config['gaussian_noise'],
# 'pgd': self.config['pgd'],
# 'pgd_constraint': self.config['pgd_constraint'],
# 'pgd_clip_frequency': self.config['pgd_clip_frequency'],
# 'pgd_adaptive': self.config['pgd_adaptive'],
# 'weight_regularization_alpha': self.config['weight_regularization_alpha'],
# 'quantization': self.config['quantization'],
# 'q_bits': self.config['q_bits'],
# 'q_frac': self.config['q_frac'],
# 'optimized_training': self.config['optimized_training']
# }
self.client_config = self.config.client
self.build_clients(self.malicious_clients)
def build_clients(self, mal_clients):
if self.attack_type == Attack.BACKDOOR:
for bid in range(self.num_clients):
x, y = self.global_dataset.get_dataset_for_client(bid)
if mal_clients[bid] and self.config.environment.attacker_full_dataset:
x, y = self.global_dataset.get_full_dataset(x.shape[0] * 20)
if mal_clients[bid]:
ds = self.get_local_dataset(self.attack_dataset.augment_data, self.attack_dataset, x, y, batch_size=self.batch_size)
else:
ds = self.get_local_dataset(self.config.dataset.augment_data, self.attack_dataset, x, y, batch_size=self.batch_size)
# dataset = self.global_dataset.get_dataset_for_client(bid)
# ds = GeneratorDataset(dataset, self.batch_size)
if mal_clients[bid]:
if self.attack_dataset.type != "pixel_pattern":
print(f"Replacing value {self.attack_dataset.type}")
# This is very ugly, but we do not want to assign pixel pattern as it uses
# training data of the client...
ds.x_aux, ds.y_aux, ds.mal_aux_labels = self.global_dataset.x_aux_train, \
self.global_dataset.y_aux_train, \
self.global_dataset.mal_aux_labels_train
ds.x_aux_test, ds.mal_aux_labels_test = self.global_dataset.x_aux_test, \
self.global_dataset.mal_aux_labels_test
self.client_objs.append(Client(bid, self.client_config, ds, mal_clients[bid]))
else:
for bid in range(self.num_clients):
x, y = self.global_dataset.get_dataset_for_client(bid)
if mal_clients[bid] and self.config.environment.attacker_full_dataset:
x, y = self.global_dataset.get_full_dataset(x.shape[0] * 20)
ds = self.get_local_dataset(self.config.dataset.augment_data, self.attack_dataset, x, y, batch_size=self.batch_size)
self.client_objs.append(Client(bid, self.client_config, ds, mal_clients[bid]))
@staticmethod
def get_local_dataset(augment_data, attack_config, x, y, batch_size):
if attack_config is not None and attack_config.type == 'pixel_pattern':
return PixelPatternDataset(x, y, attack_config.target_label, batch_size=batch_size)
if augment_data:
return ImageGeneratorDataset(x, y, batch_size=batch_size)
else:
return Dataset(x, y, batch_size=batch_size)
def build_dataset(self):
return data_loader.load_global_dataset(self.config, self.malicious_clients, self.attack_dataset)
@staticmethod
def compute_updates(prev_weights, new_weights):
"""Compute difference between two model weights.
Args:
prev_weights (list): Parameters from previous iteration.
new_weights (list): New weights.
Returns:
list: List of gradients.
"""
return [new_weights[i] - prev_weights[i] for i in range(len(prev_weights))]
def save_client_updates(self, client_id, malicious, round, new_weights, prev_global_model_weights):
"""Saves client updates into `self.client_updates_dir` directory."""
if type(malicious) is not str:
malicious = 'm' if malicious else 'b'
delta_weights = self.compute_updates(prev_global_model_weights, new_weights)
file_name = '%i_%s_%i' % (client_id, malicious, round)
outfile = os.path.join(self.client_updates_dir, file_name)
np.save(outfile, delta_weights)
def _create_weights_list(self, selected_clients):
"""Creates dictionary (client weights for each selected client).
Additionally, it sets dropout masks if federated_dropout is < 1.0.
Args:
selected_clients (np.ndarray): Randomly selected clients without replacement.
Returns:
dict: Mappings of client id -> model parameters.
"""
if self.federated_dropout is None:
return None, {i: self.model.get_weights() for i in selected_clients}
# create dropout mask for each client
if self.federated_dropout.nonoverlap:
client_dropout_mask = create_dropout_mask(self.model, self.federated_dropout.rate,
self.federated_dropout.all_parameters,
n_clients=len(selected_clients))
else:
client_dropout_mask = []
for _ in selected_clients:
client_dropout_mask.append(create_dropout_mask(self.model, self.federated_dropout.rate,
self.federated_dropout.all_parameters)[0])
# apply dropout mask
weights_list = {}
for i, dropout_mask in enumerate(client_dropout_mask):
self.client_objs[selected_clients[i]].set_dropout_mask(dropout_mask)
model_weights = deepcopy(self.model.get_weights())
if not self.federated_dropout.randommask:
for l in range(len(model_weights)):
model_weights[l] = model_weights[l]*dropout_mask[l]
weights_list[selected_clients[i]] = model_weights
return client_dropout_mask, weights_list
def fit(self):
"""Trains the global model."""
# central_optimizer = Model.create_optimizer(self.config['optimizer'], self.config['learning_rate'],
# self.config['decay_steps'], self.config['decay_rate']) # REMOVE THIS
accuracies, rounds, adv_success_list = [], [], []
# loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
# from_logits=False) # Our model has a softmax layer!
# self.model.compile(
# loss=loss_object,
# optimizer=tf.keras.optimizers.Adam(0.001),
# metrics=['accuracy']
# )
# from src.torch_compat.anticipate import convert_model, evaluate_model
# torch_model = convert_model(self.model)
# evaluate_model(torch_model, self.global_dataset,
# self.config.client.benign_training.batch_size, self.config.server.num_test_batches)
logging.info("Starting training...")
test_accuracy, adv_success, test_loss = self.evaluate()
print('round=', 0, '\ttest_accuracy=', test_accuracy,
'\tadv_success=', adv_success, '\ttest_loss=', test_loss, flush=True)
import os
import psutil
for round in range(1, self.num_rounds + 1):
process = psutil.Process(os.getpid())
logging.debug("Memory info: " + str(process.memory_info().rss)) # in bytes
start_time = time.time()
if self.attack_frequency is None:
selected_clients = np.random.choice(self.num_clients, self.num_selected_clients, replace=False)
else:
indexes = np.array([[i, self.client_objs[i].malicious] for i in range(len(self.client_objs))])
np.random.shuffle(indexes)
assert len(indexes[indexes[:, 1] == True]) > 0, "There are 0 malicious attackers."
if round % (1 / self.attack_frequency) == 0:
num_malicious_selected = self.config.environment.num_selected_malicious_clients or self.num_malicious_clients
honest = indexes[indexes[:, 1] == False][:self.num_selected_clients - num_malicious_selected, 0]
malicious = indexes[indexes[:, 1] == True][0:num_malicious_selected][:, 0]
selected_clients = np.concatenate([malicious, honest])
else:
honest = indexes[indexes[:, 1] == False][:self.num_selected_clients, 0]
selected_clients = honest
assert len(selected_clients) == self.num_selected_clients, "There must be enough non-malicious clients to select."
client_dropout_masks, weights_list = self._create_weights_list(selected_clients)
# If attacker has full knowledge of a round
intermediate_benign_client_weights = [] if self.config.environment.attacker_full_knowledge else None
#################
# TRAINING LOOP #
#################
for i in (c for c in selected_clients if not self.client_objs[c].malicious): # Benign
# logging.debug(f"Client {i}: Train")
self.client_objs[i].last_global_weights_server = self.previous_round_weights
self.client_objs[i].set_weights(weights_list[i])
# logging.debug(f"Client {i}: Set weights")
self.client_objs[i].set_model(self.model)
# logging.debug(f"Client {i}: Set model")
self.client_objs[i].train(round)
# logging.debug(f"Client {i}: Train")
self.client_objs[i].set_model(None)
if self.config.environment.attacker_full_knowledge:
intermediate_benign_client_weights.append(
FederatedAveraging.compute_updates(self.client_objs[i].weights, weights_list[i])
)
single_malicious_update = None
for i in (c for c in selected_clients if self.client_objs[c].malicious): # Malicious
if self.config.client.malicious.multi_attacker_scale_divide and single_malicious_update is not None:
self.client_objs[i].set_weights(single_malicious_update)
else:
self.client_objs[i].last_global_weights_server = self.previous_round_weights
self.client_objs[i].set_weights(weights_list[i])
self.client_objs[i].set_model(self.model)
self.client_objs[i].set_benign_updates_this_round(intermediate_benign_client_weights)
self.client_objs[i].train(round)
self.client_objs[i].set_model(None)
if self.config.client.malicious.multi_attacker_scale_divide:
print("Setting multi attacker")
single_malicious_update = self.client_objs[i].weights
if self.config.environment.save_updates:
for i in selected_clients:
self.save_client_updates(self.client_objs[i].id, self.client_objs[i].malicious, round,
self.client_objs[i].weights, weights_list[i])
num_adversaries = np.count_nonzero([self.malicious_clients[i] for i in selected_clients])
selected_clients_list = [self.client_objs[i] for i in selected_clients]
if client_dropout_masks is not None:
# Federated Dropout
weights = aggregate_weights_masked(self.global_weights, self.config.server.global_learning_rate, self.num_clients, self.federated_dropout.rate, client_dropout_masks, [client.weights for client in selected_clients_list])
# weights = self.aggregate_weights([client.weights for client in selected_clients_list])
elif self.config.environment.ignore_malicious_update:
# Ignore malicious updates
temp_weights = [client.weights for client in selected_clients_list if not client.malicious]
weights = self.aggregator.aggregate(self.global_weights, temp_weights)
else:
# weights = selected_clients_list[0].weights # just take one, malicious
temp_weights = [client.weights for client in selected_clients_list]
if self.config.client.clip is not None and self.config.client.clip.type == "median_l2": # Apply dynamic norm bound
temp_weights = self.apply_dynamic_clipping(temp_weights)
weights = self.aggregator.aggregate(self.global_weights, temp_weights)
if self.config.server.gaussian_noise > 0.0:
logging.debug(f"Adding noise to aggregated model {self.config.server.gaussian_noise}")
total_norm = tf.norm(tf.concat([tf.reshape(weights[i], [-1])
for i in range(len(weights))], axis=0))
print(f"Global weight norm: {total_norm}")
for i, layer in enumerate(weights):
noise = self.noise_with_layer(layer)
any_noise_nan = np.isnan(noise).any()
any_layer_nan = np.isnan(layer).any()
import sys
if any_noise_nan:
print("Noise is NaN!")
np.set_printoptions(threshold=sys.maxsize)
print(noise)
if any_layer_nan:
print(f"Layer {i} is NaN1")
np.set_printoptions(threshold=sys.maxsize)
print(self.global_weights[i])
# print(temp_weights[i])
print(layer)
exit(1)
sum = layer + noise
if np.isnan(sum).any():
print("Sum is NaN!")
np.set_printoptions(threshold=sys.maxsize)
print(sum)
weights[i] = sum
# weights = [layer + self.noise_with_layer(layer) for layer in weights]
if self.keep_history:
self.parameters_history.append(deepcopy(weights))
if round % self.print_every == 0:
self.previous_round_weights = self.model.get_weights()
self.model.set_weights(weights)
if Model.model_supports_weight_analysis(self.model_name):
self.writer.analyze_weights(self.model, self.global_weights, selected_clients_list, round, self.parameters_history,
self.config.environment.save_norms, self.config.environment.save_weight_distributions,
self.config.environment.save_weight_outside_bound)
self.global_weights = weights
test_accuracy, adv_success, test_loss = self.evaluate()
duration = time.time() - start_time
self.writer.add_test_metric(test_accuracy, adv_success, round)
self.writer.add_honest_train_loss(selected_clients_list, round)
self.writer.add_adversary_count(num_adversaries, round)
accuracies.append(test_accuracy)
adv_success_list.append(adv_success)
rounds.append(round)
print('round=', round, '\ttest_accuracy=', test_accuracy, '\tadv_success=', adv_success,
'\ttest_loss=', test_loss, '\tduration=', duration, flush=True)
else:
self.model.set_weights(weights)
self.global_weights = weights
if round in self.config.environment.save_model_at:
self.save_model(round)
for client in self.client_objs:
client.weights = None # Release
log_data(self.experiment_dir, rounds, accuracies, adv_success_list)
self.log_hparams(rounds, accuracies, adv_success_list)
def noise_with_layer(self, w):
sigma = self.config.server.gaussian_noise
gauss = np.random.normal(0, sigma, w.shape)
gauss = gauss.reshape(w.shape).astype(w.dtype)
return gauss
@staticmethod
def average_weights(client_weight_list):
"""Procedure for averaging client weights"""
new_weights = deepcopy(client_weight_list[0])
# return new_weights
for client in range(1, len(client_weight_list)):
for layer in range(len(client_weight_list[client])):
new_weights[layer] = new_weights[layer] + client_weight_list[client][layer]
for layer in range(len(new_weights)):
new_weights[layer] = new_weights[layer] / len(client_weight_list)
return new_weights
@tf.function
def optimized_evaluate(self, batch_x, batch_y):
prediction_tensor = self.model(batch_x, training=False)
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False)(y_true=batch_y, y_pred=prediction_tensor)
# tf.print(loss)
prediction = prediction_tensor
y_ = tf.cast(tf.argmax(prediction, axis=1), tf.uint8)
test_accuracy_batch = tf.equal(y_, batch_y)
self.test_accuracy(tf.reduce_mean(tf.cast(test_accuracy_batch, tf.float32)))
self.test_loss(loss)
if self.config.environment.print_batch_text:
self.print_batch_text(batch_x, prediction)
def print_batch_text(self, batch_x, prediction):
select = 0
ALL_LETTERS = tf.constant(list("\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"), dtype=tf.string)
# print(ALL_LETTERS)
# tf.print(ALL_LETTERS)
selected_letters = tf.strings.join(
tf.unstack(
tf.gather(ALL_LETTERS, indices=batch_x[select])))
y_ = tf.cast(tf.argmax(prediction, axis=1), tf.int32)
y_letter = tf.gather(ALL_LETTERS, y_[select])
tf.print(selected_letters, y_letter)
def evaluate(self):
"""Evaluates model performances; accuracy on test set and adversarial success.
Returns:
tuple of two floats: test accuracy, adversarial success
"""
# return 0, 0
# Batched because of memory issues
# test_accuracies = []
# predictions = []
# loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
self.test_accuracy.reset_states()
self.test_loss.reset_states()
for batch_x, batch_y in self.global_dataset.get_test_batch(
self.config.client.benign_training.batch_size, self.config.server.num_test_batches):
self.optimized_evaluate(batch_x, batch_y)
test_accuracy = self.test_accuracy.result().numpy()
test_loss = self.test_loss.result().numpy()
# print(test_accuracy.nump)
# self.test_get_correct_indices()
# calculating adv success
if self.num_malicious_clients == 0:
adv_success = 0
elif self.attack_type == Attack.UNTARGETED or self.attack_type == Attack.DEVIATE_MAX_NORM:
adv_success = 1 - test_accuracy
elif self.attack_type == Attack.BACKDOOR:
all_adv_success = []
batches = 0
attack_config: AttackDatasetConfig = self.attack_dataset
amount_images = max(attack_config.augment_times, self.global_dataset.x_aux_test.shape[0])
batch_size = min(self.global_dataset.x_aux_test.shape[0], self.config.client.benign_training.batch_size)
total_batches = int(amount_images / batch_size) # handle case ?
for batch_x, batch_y in self.global_dataset.get_aux_generator(self.config.client.benign_training.batch_size,
attack_config.augment_times,
attack_config.augment_data,
attack_config.type,
attack_config.max_test_batches):
preds = self.model(batch_x, training=False).numpy().argmax(axis=1)
pred_inds = preds == batch_y
if self.config.environment.print_backdoor_eval:
logging.info(f"Backdoor predictions: {preds}")
# This may break on large test sets
# adv_success = np.mean(pred_inds)
all_adv_success.append(pred_inds)
batches += 1
if batches > total_batches:
break # manually
adv_success = np.mean(np.concatenate(all_adv_success))
else:
raise Exception('Type not supported')
return test_accuracy, adv_success, test_loss
def test_get_correct_indices(self):
"""Debug helper"""
from src.backdoor.edge_case_attack import EuropeanSevenEdgeCase
# (batch_x, batch_y), (_, _) = EuropeanSevenEdgeCase().load()
(_, _), (batch_x, batch_y) = EuropeanSevenEdgeCase().load()
as_7s = np.repeat(7, batch_y.shape)
preds = self.model(batch_x, training=False).numpy().argmax(axis=1)
pred_inds = preds == as_7s
print(np.where(preds == as_7s))
# print(f"Correct: {self.global_dataset.y_aux_test[pred_inds]} -> {preds[pred_inds]}")
def save_model(self, round):
path = os.path.join(self.global_model_dir, f'model_{round}.h5')
print(f"Saving model at {path}")
self.model.save(path)
def write_hparams(self, hparams, metrics):
self.writer.write_hparams(hparams, metrics)
def log_hparams(self, rounds, accuracies, adv_successes):
if self.config.hyperparameters is None:
return
# for now only log last round's values
METRIC_ACCURACY = 'evaluation/test_accuracy'
METRIC_ADV_SUCCESS = 'evaluation/adv_success'
hparams_dict = flatten(self.config.hyperparameters.args)
metrics = {
METRIC_ACCURACY: accuracies[-1],
METRIC_ADV_SUCCESS: adv_successes[-1]
}
self.writer.write_hparams(hparams_dict, metrics)
def apply_dynamic_clipping(self, weights):
if self.config.client.clip.type == "median_l2":
client_delta_weights = [[client_weights[i] - self.global_weights[i] for i in range(len(client_weights))] \
for client_weights in weights]
l2_norms_per_client = [tf.norm(tf.concat([tf.reshape(delta_weights[i], [-1]) \
for i in range(len(delta_weights))], axis=0)) \
for delta_weights in client_delta_weights] # for norm calculation
median = np.median(l2_norms_per_client)
median_factor = tf.constant(self.config.client.clip.value)
bound = median * median_factor
print(f"Effective bound: {bound}")
multipliers_per_client = [min((bound / norm).numpy(), 1.0) for norm in l2_norms_per_client]
# delta_multiplied = [delta_weights[i] * multiply if i in clip_layers else delta_weights[i] for i in
# range(len(delta_weights))]
delta_multiplied = [[client_weights[i] * multiplier for i in range(len(client_weights))] \
for client_weights, multiplier in zip(client_delta_weights, multipliers_per_client)]
# Add back to global model to fit in other calculations
return [[self.global_weights[i] + client_weights[i] for i in range(len(client_weights))] \
for client_weights in delta_multiplied]
else:
return weights
| 32,412 | 47.595202 | 235 |
py
|
fl-analysis
|
fl-analysis-master/src/prob_clip.py
|
import unittest
import numpy as np
import time
def clip(updates, num_bits, num_frac, random=True):
"""Clips an update as if we had fixed precision `num_bits` with `num_frac` fractional bits"""
assert num_bits >= num_frac
# shifted = np.array(update, dtype=np.float) * pow(2, num_frac)
# print(shifted)
# for s in shifted:
# min = np.binary_repr(int(s), width=num_bits)
# max = np.binary_repr(int(round(s)), width=num_bits)
# print(min, max)
# start = time.time()
num_base = num_bits - num_frac
# for b in range(int(pow(2, num_base - 1)), 0, -1):
# fra = 1.0 / pow(2, num_frac)
# for f in np.arange(1, 0, -fra):
# val = -b - f
# bins.append(val)
# for b in range(0, int(pow(2, num_base - 1))):
# fra = 1.0 / pow(2, num_frac)
# for f in np.arange(0, 1, fra):
# val = b + f
# bins.append(val)
fra = 1.0 / pow(2, num_frac)
lim = int(pow(2, num_base - 1))
print(lim, fra)
bins = np.arange(-lim + fra, lim, fra)
clipped_all = []
for update in updates:
update = np.clip(update, bins[0], bins[-1] - 0.00001) # Clip to ensure value is within range
shape = None
if len(update.shape) > 1:
shape = update.shape
update = update.flatten()
digits = np.digitize(update, bins)
digits = np.clip(digits, 0, len(bins) - 1)
# clipped = [clip_binary(x, bins[maxIndice - 1], bins[maxIndice], random) for maxIndice, x in zip(digits, update)]
r = np.random.rand(len(digits)) if random else 0.5
min, max = bins[digits - 1], bins[digits]
assert np.all(min <= update) and np.all(update <= max)
translated = (update - min) / (max - min)
clipped = np.where(translated < r, min, max)
if shape is not None:
clipped = np.array(clipped, dtype=np.float32).reshape(shape)
clipped_all.append(clipped)
# end = time.time()
# print(f"Done in {end-start} seconds!")
return clipped_all
def clip_binary(x, min, max, random):
assert min <= x <= max, f"{min} <= {x} <= {max}"
if x == min:
return min
elif x == max:
return max
norm = max - min
val = (x - min) / norm
if random:
r = np.random.rand(1)
if val < r:
return min
else:
return max
else:
if val >= 0.5:
return max
else:
return min
class TestClip(unittest.TestCase):
def test_it_clips(self):
update = [0.5, 0.001223, 0.12, 8.4]
num_bits = 8
num_frac = 4
clip(update, num_bits, num_frac)
def test_it_clips_large(self):
D = 32000
update = np.random.random(D) * 15
num_bits = 8
num_frac = 4
clip(update, num_bits, num_frac)
def test_it_clips_large_average(self):
D = 32000
update = np.random.random(D) * 15
num_bits = 8
num_frac = 4
clipped = clip(update, num_bits, num_frac)
avg_c, avg_o = np.average(clipped), np.average(update)
print(avg_c, avg_o)
def test_it_clips_edges(self):
update = [513.0, -513.0, 0.001223, 0.12, 8.4]
num_bits = 8
num_frac = 7
clipped = clip([update], num_bits, num_frac, random=False)
print(clipped)
| 3,384 | 29.495495 | 122 |
py
|
fl-analysis
|
fl-analysis-master/src/client_attacks.py
|
from enum import Enum
class Attack(Enum):
# Targeted attack strategies
BACKDOOR = "backdoor"
# Byzantine strategies
UNTARGETED = 'untargeted'
DEVIATE_MAX_NORM = 'deviate_max_norm'
@staticmethod
def is_targeted(type) -> bool:
"""
:type type: Attack
"""
if type == Attack.BACKDOOR:
return True
else:
return False
| 408 | 17.590909 | 41 |
py
|
fl-analysis
|
fl-analysis-master/src/loss.py
|
import tensorflow as tf
import numpy as np
# Define custom loss
def regularized_loss(local_weights, global_weights, alpha):
# Create a loss function that adds the MSE loss to the mean of all squared activations of a specific layer
def loss(y_true, y_pred):
cross_entropy_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)(y_true, y_pred)
weight_norm = 0
layer_i = 0
for local_weight_layer in local_weights:
w = local_weight_layer.weights
if len(w) > 1:
global_layer_tensor_w = tf.convert_to_tensor(global_weights[layer_i])
# global_layer_tensor_b = tf.convert_to_tensor(global_weights[layer_i + 1])
delta_weight = w[0] - global_layer_tensor_w
weight_norm += tf.nn.l2_loss(delta_weight)
layer_i += len(w)
# print(f"ent {cross_entropy_loss}, w {weight_norm}")
return alpha * cross_entropy_loss + ((1 - alpha) * tf.math.maximum(0, weight_norm))
# Return a function
return loss
| 1,069 | 37.214286 | 110 |
py
|
fl-analysis
|
fl-analysis-master/src/custom_summary_writer.py
|
import numpy as np
from tensorboardX import SummaryWriter
from os.path import join
from numpy.linalg import norm
from tensorflow.python.keras.layers.convolutional import Conv2D, ZeroPadding2D, DepthwiseConv2D
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.keras.layers.embeddings import Embedding
from tensorflow.python.keras.layers.recurrent_v2 import LSTM
from scipy.linalg import eigh
from src.federated_averaging import FederatedAveraging
from src.util import power_iteration
import pandas as pd
class CustomSummaryWriter:
def __init__(self, experiment_dir):
self.events_dir = join(experiment_dir, 'events')
self.norms_dir = join(experiment_dir, 'norms')
self.writer = SummaryWriter(self.events_dir)
self.params_outside_norm_df = pd.DataFrame(columns=['round', 'clients'])
def add_test_metric(self, test_accuracy, adv_success, step):
"""Adds performance metrics to tensorboard log files.
Args:
test_accuracy (float): Accuracy on the unseen set.
adv_success (float): Adversarial success.
step (int): Global step (round).
"""
self.writer.add_scalar(f'evaluation/test_accuracy', test_accuracy, step)
self.writer.add_scalar(f'evaluation/adv_success', adv_success, step)
self.writer.flush()
def add_honest_train_loss(self, selected_clients_list, step):
train_loss = []
train_accuracy = []
for client in selected_clients_list:
if not client.malicious:
train_loss.append(client.train_loss.result())
train_accuracy.append(client.train_accuracy.result())
total_train_loss = np.mean(train_loss)
total_train_accuracy = np.mean(train_accuracy)
self.writer.add_scalar(f'evaluation/honest_train_accuracy', total_train_loss, step)
self.writer.add_scalar(f'evaluation/mean_train_accuracy', total_train_accuracy, step)
def add_adversary_count(self, num_adversaries_active, step):
self.writer.add_scalar(f'adversary/count', num_adversaries_active, step)
def analyze_weights(self, model, prev_weights, selected_clients, step, parameters_history=None, save_norms=False,
save_histograms=False, save_weight_outside_bound=None):
"""Analyzes model updates.
Args:
model (tf model): Current global model.
selected_clients (list): List of Client objects.
step (int): Global step (round).
parameters_history (list): (Optional) List of weights from previous rounds.
"""
# prev_weights = model.get_weights()
benign_updates, mal_updates = [], []
for client in selected_clients:
if client.malicious:
mal_updates.append(FederatedAveraging.compute_updates(prev_weights, client.weights))
else:
benign_updates.append(FederatedAveraging.compute_updates(prev_weights, client.weights))
benign_update = FederatedAveraging.average_weights(benign_updates) if benign_updates != [] else [None]*len(prev_weights)
mal_update = FederatedAveraging.average_weights(mal_updates) if mal_updates != [] else [None]*len(prev_weights)
layer_names = self.flatten([self.get_layer_name(layer) for layer in model.layers])
layer_names = [i for i in layer_names if i is not None]
# layer_names = ['Conv2D' if type(layer) == Conv2D else 'Dense'
# for layer in model.layers if type(layer) in [Conv2D, Dense]]
if len(layer_names) == 0:
layer_names = ['Theta']
printable_weights_index = [i for i in range(len(prev_weights)) if len(prev_weights[i].shape) > 1] # skip biases
for i, layer in enumerate(printable_weights_index):
for update, label in zip([mal_update[layer], benign_update[layer]], ['mal', 'benign']):
if update is None:
continue
suffix = f'norm_{label}_client_updates/l{layer}_{layer_names[i]}'
# l2, l1 norm
self.writer.add_scalar(f'l2_{suffix}', norm(update, axis=-1).mean(), step)
self.writer.add_scalar(f'l1_{suffix}', norm(update, ord=1, axis=-1).mean(), step)
# principle eig value
if parameters_history is not None and len(parameters_history) > 1:
layer_history = [parameters[layer] for parameters in parameters_history]
princ_eig = self._principle_eigen_value(layer_history, layer_names[i])
self.writer.add_scalar(f'princ_eig_{suffix}', princ_eig, step)
if save_histograms and len(mal_updates) > 0:
mal_merged = np.concatenate([update[layer] for update in mal_updates])
self.writer.add_histogram(f'histogram_mal/l{layer}_{layer_names[i]}', mal_merged.reshape(-1), step)
if save_histograms and len(benign_updates) > 0:
ben_merged = np.concatenate([update[layer] for update in benign_updates])
self.writer.add_histogram(f'histogram_ben/l{layer}_{layer_names[i]}', ben_merged.reshape(-1), step)
ben_for_layer = [update[layer] for update in benign_updates]
means = []
stds = []
for x in ben_for_layer:
n, bins = np.histogram(x)
mids = 0.5 * (bins[1:] + bins[:-1])
probs = n / np.sum(n)
mean = np.sum(probs * mids)
sd = np.sqrt(np.sum(probs * (mids - mean) ** 2))
means.append(mean)
stds.append(sd)
self.writer.add_histogram(f'histogram_ben_mean/l{layer}_{layer_names[i]}', np.array(means), step)
self.writer.add_histogram(f'histogram_ben_std/l{layer}_{layer_names[i]}', np.array(stds), step)
benign_norms_l2 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1) for bs in benign_updates]
benign_norms_l1 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1, ord=1) for bs in
benign_updates]
self.writer.add_scalar(f'l2_total/benign', np.mean(benign_norms_l2), step)
self.writer.add_scalar(f'l1_total/benign', np.mean(benign_norms_l1), step)
if len(mal_updates) > 0:
mal_norms_l2 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1) for bs in mal_updates]
mal_norms_l1 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1, ord=1) for bs in mal_updates]
self.writer.add_scalar(f'l2_total/mal', np.mean(mal_norms_l2), step)
self.writer.add_scalar(f'l1_total/mal', np.mean(mal_norms_l1), step)
if save_norms:
self.save_norms_log(step, np.array([benign_norms_l2, benign_norms_l1, mal_norms_l2, mal_norms_l1]))
elif save_norms:
self.save_norms_log(step, np.array([benign_norms_l2, benign_norms_l1]))
if save_weight_outside_bound is not None:
# compute weights outside l_inf bound
l_inf_norm = save_weight_outside_bound
benign_updates_flat = [np.concatenate([np.reshape(l, [-1]) for l in u], axis=0) for u in benign_updates]
malicious_updates_flat = [np.concatenate([np.reshape(l, [-1]) for l in u], axis=0) for u in mal_updates]
counts_per_update_benign = [np.sum(u > l_inf_norm) + np.sum(u < -l_inf_norm) for u in benign_updates_flat]
counts_per_update_malicious = [np.sum(u > l_inf_norm) + np.sum(u < -l_inf_norm) for u in malicious_updates_flat]
self.save_weight_outside_norm(l_inf_norm, step, counts_per_update_benign + counts_per_update_malicious)
self.writer.flush()
def save_norms_log(self, step, array):
np.save(join(self.norms_dir, f"round_{step}"), array)
def save_weight_outside_norm(self, linf_norm, step, array):
"""
@param array: should contain the counts of the norms outside the bound
@return:
"""
self.params_outside_norm_df = self.params_outside_norm_df.append({"round": step, "clients": array}, ignore_index=True)
self.params_outside_norm_df.to_csv(join(self.norms_dir, f'params_outside_bound_{linf_norm}.csv'))
def write_hparams(self, hparams, metrics):
self.writer.add_hparams(hparams, metrics)
@staticmethod
def _principle_eigen_value(layer_weights, layer_type=None):
"""Computes principle eigenvalue.
Args:
layer_weights (list): List of np.ndarray parameters.
layer_type (str): List of layer names.
Returns:
float64: Principle eigenvalue.
"""
layer_weights = np.stack([layer_params.reshape(-1) for layer_params in layer_weights], axis=0) # NxM
cov_matrix = np.cov(layer_weights.T) # MxM
# _w, _v = eigh(cov_matrix) # princ w[-1]
w, v = power_iteration(cov_matrix)
return w # largest eigenvalue
def get_layer_name(self, layer):
layers = {
Conv2D: 'Conv2D',
Dense: 'Dense',
ZeroPadding2D: 'ZeroPadding2D',
DepthwiseConv2D: 'DepthwiseConv2D',
Embedding: 'Embedding',
LSTM: ['LSTM', 'LSTM'], # two sets of weights
}
if type(layer) in layers.keys():
return layers[type(layer)]
return None
def flatten(self, list_to_flatten):
output = []
for l in list_to_flatten:
if isinstance(l, list):
for m in l:
output.append(m)
else:
output.append(l)
return output
| 9,803 | 47.534653 | 130 |
py
|
fl-analysis
|
fl-analysis-master/src/attack_dataset_config.py
|
class AttackDatasetConfig(object):
def __init__(self, type, train=[], test=[],
target_label=None,
remove_from_benign_dataset=False,
augment_times=0,
augment_data=False,
tasks=None,
source_label=None,
aux_samples=None,
edge_case_type=None,
edge_case_p=1.0,
pixel_pattern_type=None,
max_test_batches=None,
trigger_position=0):
self.type = type
self.train = train
self.test = test
self.source_label = source_label
self.target_label = target_label
self.remove_from_benign_dataset = remove_from_benign_dataset
self.augment_times = augment_times
self.augment_data = augment_data
self.tasks = tasks
self.aux_samples = aux_samples
self.edge_case_type = edge_case_type
self.edge_case_p = edge_case_p
self.pixel_pattern_type = pixel_pattern_type
self.max_test_batches = max_test_batches
self.trigger_position = trigger_position
| 1,148 | 34.90625 | 68 |
py
|
fl-analysis
|
fl-analysis-master/src/tf_model.py
|
import math
import tensorflow as tf
from src.model.modelc import build_modelc
from src.model.lenet import build_lenet5
from src.model.resnet import resnet_v2, resnet_v1
from src.model.stacked_lstm import build_stacked_lstm
from src.model.test_model import build_test_model
from src.subspace.builder.model_builders import build_model_mnist_fc, \
build_cnn_model_mnist_bhagoji, build_cnn_model_mnist_dev_conv, build_cnn_model_mnistcnn_conv, build_LeNet_cifar, \
build_cnn_model_cifar_allcnn, build_model_cifar_LeNet_fastfood
from src.subspace.builder.resnet import build_LeNet_resnet, build_resnet_fastfood
from tensorflow.keras.regularizers import l2
from src.model.mobilenet import mobilenetv2_cifar10
class Model:
@staticmethod
def create_model(model_name, intrinsic_dimension=None, regularization_rate=None, disable_bn=False):
"""Creates NN architecture based on a given model name
Args:
model_name (str): name of a model
"""
if model_name == 'mnist_cnn':
do_fact = 0.3
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu',
input_shape=(28, 28, 1), dtype=float),
tf.keras.layers.MaxPooling2D(pool_size=2),
# tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
# tf.keras.layers.Dropout(0.3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
# tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
elif model_name == 'dev':
regularizer = l2(regularization_rate) if regularization_rate is not None else None
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1), kernel_regularizer=regularizer, bias_regularizer=regularizer),
tf.keras.layers.Conv2D(4, (3, 3), activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer),
tf.keras.layers.Dense(10, activation='softmax'),
])
elif model_name == 'bhagoji':
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=(5, 5), padding='valid', activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
# tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
# tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
elif model_name == 'lenet5_cifar':
model = build_lenet5(input_shape=(32, 32, 3), l2_reg=regularization_rate)
elif model_name == 'lenet5_mnist':
model = build_lenet5(input_shape=(28, 28, 1), l2_reg=regularization_rate)
model.summary()
elif model_name == 'allcnn':
model = build_modelc(l2_reg=regularization_rate)
model.summary()
elif model_name == 'allcnn_intrinsic':
model = build_cnn_model_cifar_allcnn(vsize=intrinsic_dimension, weight_decay=regularization_rate)
elif model_name == 'resnet18':
model = resnet_v1(input_shape=(32, 32, 3), depth=20)
model.summary()
elif model_name == 'resnet32':
model = resnet_v1(input_shape=(32, 32, 3), depth=32)
elif model_name == 'resnet44':
model = resnet_v1(input_shape=(32, 32, 3), depth=44)
elif model_name == 'resnet56':
model = resnet_v1(input_shape=(32, 32, 3), depth=56)
model.summary()
elif model_name == 'resnet110':
model = resnet_v1(input_shape=(32, 32, 3), depth=110)
elif model_name == 'resnet18_v2':
model = resnet_v2(input_shape=(32, 32, 3), depth=20)
elif model_name == 'resnet56_v2':
model = resnet_v2(input_shape=(32, 32, 3), depth=56)
model.summary()
print("HI")
elif model_name == 'mobilenet':
model = mobilenetv2_cifar10()
model.summary()
elif model_name == 'dev_fc_intrinsic':
model, _ = build_model_mnist_fc(vsize=intrinsic_dimension, width=100)
elif model_name == 'bhagoji_intrinsic':
model = build_cnn_model_mnist_bhagoji(vsize=intrinsic_dimension, proj_type='sparse')
elif model_name == 'dev_intrinsic':
# model = build_model_cifar_LeNet_fastfood(vsize=intrinsic_dimension)
model = build_cnn_model_mnist_dev_conv(vsize=intrinsic_dimension, proj_type='sparse', weight_decay=regularization_rate)
Model.normalize(model)
elif model_name == 'mnistcnn_intrinsic':
model = build_cnn_model_mnistcnn_conv(vsize=intrinsic_dimension, proj_type='sparse')
elif model_name =='lenet5_intrinsic':
# model = build_lenet_cifar_old(intrinsic_dimension)
model = build_LeNet_cifar(vsize=intrinsic_dimension, proj_type='sparse', weight_decay=0.001)
Model.normalize(model)
elif model_name =='resnet18_intrinsic':
# model = build_lenet_cifar_old(intrinsic_dimension)
model = build_LeNet_resnet(20, vsize=intrinsic_dimension, proj_type='sparse', weight_decay=0.001,
disable_bn=disable_bn)
# model = build_resnet_fastfood(20, vsize=intrinsic_dimension, proj_type='sparse', weight_decay=0.001)
Model.normalize(model)
model.summary()
elif model_name == 'stacked_lstm':
model = build_stacked_lstm()
model.summary()
return model
elif model_name == 'test_model':
model = build_test_model()
model.summary()
else:
raise Exception('model `%s` not supported' % model_name)
return model
@staticmethod
def normalize(model, proj_type='sparse'):
basis_matrices = []
normalizers = []
for layer in model.layers:
try:
basis_matrices.extend(layer.offset_creator.basis_matrices)
except AttributeError:
continue
try:
normalizers.extend(layer.offset_creator.basis_matrix_normalizers)
except AttributeError:
continue
if proj_type == 'sparse':
# Norm of overall basis matrix rows (num elements in each sum == total parameters in model)
# bm_row_norms = tf.sqrt(tf.add_n([tf.sparse_reduce_sum(tf.square(bm), 1) for bm in basis_matrices]))
# # Assign `normalizer` Variable to these row norms to achieve normalization of the basis matrix
# # in the TF computational graph
# rescale_basis_matrices = [tf.assign(var, tf.reshape(bm_row_norms, var.shape)) for var in normalizers]
# _ = sess.run(rescale_basis_matrices)
bm_row_norms = tf.sqrt(tf.add_n([tf.sparse.reduce_sum(tf.square(bm), 1) for bm in basis_matrices]))
for var in normalizers:
var.assign(tf.reshape(bm_row_norms, var.shape))
elif proj_type == 'dense':
bm_sums = [tf.reduce_sum(tf.square(bm), 1) for bm in basis_matrices]
divisor = tf.expand_dims(tf.sqrt(tf.add_n(bm_sums)), 1)
rescale_basis_matrices = [tf.assign(var, var / divisor) for var in basis_matrices]
_ = sess.run(rescale_basis_matrices)
@staticmethod
def model_supported(model_name, dataset_name):
supported_types = {
"mnist": ["mnist_cnn", "dev", "bhagoji", "dev_fc_intrinsic", "dev_intrinsic", "mnistcnn_intrinsic", "bhagoji_intrinsic", "lenet5_mnist"],
"fmnist": ["mnist_cnn", "dev", "bhagoji", "dev_fc_intrinsic", "dev_intrinsic", "mnistcnn_intrinsic", "bhagoji_intrinsic", "lenet5_mnist"],
"femnist": ["mnist_cnn", "dev", "bhagoji", "dev_fc_intrinsic", "dev_intrinsic", "mnistcnn_intrinsic", "bhagoji_intrinsic", "lenet5_mnist"],
"cifar10": ["resnet18", "resnet32", "resnet44", "resnet56", "resnet110", "resnet18_v2", "resnet56_v2", "lenet5_cifar", "lenet5_intrinsic", "allcnn", "allcnn_intrinsic"]
}
return model_name in supported_types[dataset_name]
@staticmethod
def model_supports_weight_analysis(model_name):
return model_name not in ["dev_intrinsic", "dev_fc_intrinsic", "bhagoji_intrinsic", "mnistcnn_intrinsic", "allcnn", "allcnn_intrinsic"]
@staticmethod
def create_optimizer(optimizer_name, learning_rate, decay, steps_per_round):
"""Creates optimizer based on given parameters
Args:
optimizer_name (str): name of the optimizer
learning_rate (float|object): initial learning rate
decay (src.config.definitions.LearningDecay|None): type of decay
steps_per_round (int): number of optimizer steps per round
Returns:
keras optimizer
"""
if decay is not None:
lr_schedule = Model.current_lr(learning_rate, decay.type,
decay.decay_steps, decay.decay_rate, decay.decay_boundaries, decay.decay_values,
decay.step_epochs, steps_per_round)
else:
lr_schedule = learning_rate
if optimizer_name == 'Adam':
return tf.keras.optimizers.Adam(lr_schedule)
elif optimizer_name == 'SGD':
return tf.keras.optimizers.SGD(lr_schedule, 0.9)
raise Exception('Optimizer `%s` not supported.' % optimizer_name)
@staticmethod
def current_lr(learning_rate, decay_type, decay_steps, decay_rate, decay_boundaries, decay_values, steps_epoch, steps_per_batch):
# lr = learning_rate * \
# math.pow(decay_rate, math.floor(epoch / decay_steps))
# lr = learning_rate * \
# tf.pow(decay_rate, tf.cast(tf.floor(epoch / decay_steps), dtype=tf.float32))
# lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
# learning_rate,
# decay_steps=decay_steps,
# decay_rate=decay_rate,
# staircase=False)
steps_multiplier = 1
if steps_epoch:
steps_multiplier = steps_per_batch
if decay_type == 'exponential':
# exp
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
learning_rate,
decay_steps=decay_steps * steps_multiplier,
decay_rate=decay_rate,
staircase=False)
return lr_schedule
elif decay_type == 'boundaries':
values = [learning_rate * v for v in decay_values]
boundaries = [boundary * steps_multiplier for boundary in decay_boundaries]
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
return lr_schedule
else:
return learning_rate
# if epoch > 300 * 2:
# learning_rate *= 1e-1
# if epoch > 250 * 2:
# learning_rate *= 1e-1
# if epoch > 200 * 2:
# learning_rate *= 1e-1
# print('Learning rate: ', lr)
# return lr_schedule
| 11,849 | 48.170124 | 180 |
py
|
fl-analysis
|
fl-analysis-master/src/config_cli.py
|
from typing import Tuple, Any
import configargparse
import logging
import src.config as cnf
from src.config.definitions import Config
parser = configargparse.ArgumentParser()
parser.add('-c', '--config_filepath', required=True, help='Path to config file.')
# logging configuration
parser.add_argument(
'-d', '--debug',
help="Print debug statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Print verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
def get_config() -> Tuple[Config, Any]:
args = parser.parse_args()
config = cnf.load_config(args.config_filepath)
logging.basicConfig(level=args.loglevel)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
logging.info(config)
return config, args
| 876 | 23.361111 | 81 |
py
|
fl-analysis
|
fl-analysis-master/src/client.py
|
import itertools
import random
import logging
from copy import deepcopy
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.callbacks import LearningRateScheduler
import src.prob_clip as prob_clip
from src.data.tf_data import Dataset
from src.error import ConfigurationError
from src.attack.attack import StealthAttack
from src.client_attacks import Attack
from src.data import image_augmentation
from src.learning_rate_decay import StepDecay
from src.loss import regularized_loss
from src.tf_model import Model
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.core import Dense
class Client:
dataset: Dataset
def __init__(self, client_id, config, dataset, malicious):
"""
:type config: config_cli.definitions.ClientConfig
"""
self.id = client_id
self.config = config
self.dataset = dataset
self.malicious = malicious
self.attack_type = Attack(config.malicious.attack_type) \
if config.malicious is not None else None
self.weights = None
self.model = None # For memory optimization
self.benign_updates_this_round = None # Attacker full knowledge
self.global_trainable_weight = None
self.last_global_weights = None
self.last_update_weights = None
self.last_global_weights_server = None # Helper to always assume client get latest global weights
# print('num of params ', np.sum([np.prod(v.shape) for v in self.model.trainable_variables]))
self._dropout_mask = None
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.loss_object = None
self.honest_optimizer = self.create_honest_optimizer()
self.pgd_step_counter = 0
def set_dropout_mask(self, dropout_mask):
self._dropout_mask = dropout_mask
def _apply_dropout(self, grads):
"""Applies dropout if dropout mask is set.
Args:
grads (list): list of tensors that are modified inplace
"""
if self._dropout_mask is None:
return
for i in range(len(grads)):
grads[i] = grads[i] * self._dropout_mask[i]
def set_weights(self, weights):
self.weights = weights
def set_model(self, model):
self.model = model
def set_benign_updates_this_round(self, updates):
"""To establish whether the attacker has full knowledge this round"""
self.benign_updates_this_round = updates
def _compute_gradients_honest(self, tape, loss_value):
grads = tape.gradient(loss_value, self.model.trainable_variables)
self._apply_dropout(grads)
return grads
def _compute_gradients(self, tape, loss_value):
grads = tape.gradient(loss_value, self.model.trainable_variables)
self._apply_dropout(grads)
return grads
def apply_quantization(self, old_weights, new_weights):
if self.config.quantization is None:
return new_weights
update = [new_weights[i] - old_weights[i]
for i in range(len(old_weights))]
quantization = self.config.quantization
if quantization.type == 'deterministic':
update = prob_clip.clip(update, quantization.bits, quantization.frac, False)
elif quantization.type == 'probabilistic':
update = prob_clip.clip(update, quantization.bits, quantization.frac, True)
else:
raise Exception('Selected quantization method does not exist!')
return [old_weights[i] + update[i] for i in range(len(old_weights))]
def perform_attack(self):
try:
attack_config = self.config.malicious
from src import attack
cls = getattr(attack, attack_config.objective["name"])
attack: StealthAttack = cls()
attack.set_stealth_method(self.evasion_factory())
except Exception as e:
raise ConfigurationError("Invalid attack configuration", e)
args = attack_config.objective['args'].copy()
args['loss_object'] = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
args['optimizer'] = self.build_optimizer(args)
args.pop('learning_rate', None)
args.pop('reduce_lr', None)
args.pop('attacker_full_dataset', None)
malicious_weights = attack.generate(self.dataset, self.model, **args)
return malicious_weights
def build_optimizer(self, optimizer_config):
# return elaborate optimizer, potentially with stepdecay
opt = optimizer_config['optimizer']
lr = optimizer_config['learning_rate'] if 'learning_rate' in optimizer_config else None
step_decay = optimizer_config['step_decay'] if 'step_decay' in optimizer_config else None
if self.attack_type != Attack.UNTARGETED:
# for untargeted attacks we dont have an eval set to measure success on
_, adv_success = self.eval_aux_test(tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False))
print(f"Adv success: {adv_success}")
if 'reduce_lr' in optimizer_config and optimizer_config['reduce_lr']:
if adv_success > 0.6:
lr = lr / 100
elif adv_success > 0.2:
lr = lr / 50
if opt == "Adam":
if lr is not None:
if step_decay is not None and step_decay:
decay = StepDecay(lr,
optimizer_config['num_epochs'] * optimizer_config['num_batch'])
optimizer_config['step_decay'] = decay
return tf.keras.optimizers.Adam(learning_rate=decay)
return tf.keras.optimizers.Adam(learning_rate=lr)
if opt == "SGD":
if lr is not None:
if step_decay is not None and step_decay:
decay = StepDecay(lr,
optimizer_config['num_epochs'] * optimizer_config['num_batch'])
optimizer_config['step_decay'] = decay
return tf.keras.optimizers.SGD(learning_rate=decay)
return tf.keras.optimizers.SGD(learning_rate=lr)
return tf.keras.optimizers.Adam()
def evasion_factory(self):
"""
:rtype: EvasionMethod|None
"""
attack_config = self.config.malicious
if attack_config.evasion is None:
return None
evasion_name = attack_config.evasion['name']
args = attack_config.evasion['args']
from src.attack import evasion
cls = getattr(evasion, evasion_name)
if evasion_name == 'NormBoundPGDEvasion':
return cls(old_weights=self.weights, benign_updates=self.benign_updates_this_round, **args)
elif evasion_name == 'NormBoundProbabilisticCheckingEvasion':
return cls(old_weights=self.weights, benign_updates=self.benign_updates_this_round, **args)
elif evasion_name == 'NeurotoxinEvasion':
return cls(old_weights=self.weights, last_round_weights=self.last_global_weights_server, benign_updates=self.benign_updates_this_round, **args)
elif evasion_name == 'TrimmedMeanEvasion':
assert self.benign_updates_this_round is not None, "Only full knowledge attack is supported at this moment"
return cls(benign_updates_this_round=self.benign_updates_this_round, **args)
else:
raise NotImplementedError(f"Evasion with name {evasion_name} not supported.")
@tf.function
def optimized_training(self, batch_x, batch_y):
"""Uses tf non-eager execution using graph"""
self.unoptimized_benign_training(batch_x, batch_y)
def unoptimized_benign_training(self, batch_x, batch_y):
with tf.GradientTape() as tape:
predictions = self.model(batch_x, training=True)
loss_value = self.loss_object(y_true=batch_y, y_pred=predictions)
reg = tf.reduce_sum(self.model.losses)
total_loss = loss_value + reg
grads = self._compute_gradients_honest(tape, total_loss)
self.honest_optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
self.train_loss(total_loss)
self.train_accuracy(batch_y, predictions)
def honest_training(self):
"""Performs local training"""
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False) # Our model has a softmax layer!
num_iters = 0
# tboard_callback = tf.keras.callbacks.TensorBoard(log_dir='logdir',
# histogram_freq=1)
if self.config.optimized_training:
for i in range(self.config.benign_training.num_epochs):
# tf.print(self.honest_optimizer._decayed_lr(tf.float32))
for batch_id, (batch_x, batch_y) in enumerate(self.dataset.get_data()):
self.optimized_training(batch_x, batch_y)
current_lr = self.honest_optimizer._decayed_lr(var_dtype=tf.float32)
# print(f"Current lr: {current_lr}")
if self.config.debug_client_training:
print(f"Epoch {i}: Train loss={self.train_loss.result()}, acc={self.train_accuracy.result()}, lr={current_lr}", flush=True)
self.train_loss.reset_states()
self.train_accuracy.reset_states()
else:
self.honest_optimizer = self.create_honest_optimizer()
for i in range(self.config.benign_training.num_epochs):
for (batch_x, batch_y) in self.dataset.get_data():
# image_augmentation.debug(batch_x, batch_y)
self.unoptimized_benign_training(batch_x, batch_y)
self.honest_optimizer = None # release for memory reasons
# Set last weights as we might act malicious next round
if self.malicious:
self.last_global_weights = self.weights # First round, don't estimate
self.last_update_weights = self.model.get_weights()
return self.model.get_weights()
def deviate_max_norm_attack(self):
"""Builds byzantine attack that has worst case opposite of gradient"""
# Currently only l_inf is supported
assert self.benign_updates_this_round is not None, "Only full knowledge attack is supported at this moment"
aggregation = self.config['aggregator']
if aggregation == "FedAvg":
assert self.config["clip"] is not None
# Full knowledge
next_update = Client.average_weights(self.benign_updates_this_round)
clip_value = self.config["clip"]
new_weights_opposite_direction = [np.sign(layer) * -clip_value for layer in next_update]
return new_weights_opposite_direction
elif aggregation == "TrimmedMean":
constant_b = 2.0
accumulator = [np.zeros([*layer.shape, len(self.benign_updates_this_round)], layer.dtype)
for layer in self.benign_updates_this_round[0]]
for client in range(0, len(self.benign_updates_this_round)):
for layer in range(len(self.benign_updates_this_round[client])):
accumulator[layer][..., client] = self.benign_updates_this_round[client][layer]
# put the per-client layers in single np array
# next_update_check = Client.average_weights(self.benign_updates_this_round)
next_update = [np.mean(layer, -1) for layer in accumulator]
layer_max = [np.max(layer, -1) for layer in accumulator]
layer_min = [np.min(layer, -1) for layer in accumulator]
directions = [np.sign(layer) for layer in next_update]
new_weights = []
for signs, max, min in zip(directions, layer_max, layer_min):
max_interval = np.where(max > 0, (max, max * constant_b), (max, max / constant_b))
min_interval = np.where(min > 0, (min / constant_b, min), (constant_b * min, min))
intervals = np.where(signs < 0, max_interval, min_interval)
intervals = np.moveaxis(intervals, 0, -1)
randomness = np.random.sample(intervals.shape[0:-1])
weights = (intervals[..., 1] - intervals[..., 0]) * randomness + intervals[..., 0]
new_weights.append(weights)
return new_weights
else:
raise NotImplementedError("Aggregation method not supported by this attack.")
def add_noise(self, batch_x):
if self.config.gaussian_noise is None:
return batch_x
sigma = self.config.gaussian_noise
gauss = np.random.normal(0, sigma, batch_x.shape)
gauss = gauss.reshape(batch_x.shape).astype(batch_x.dtype)
noisy = np.clip(batch_x + gauss, a_min=0.0, a_max=1.0)
return noisy
# def contamination_attack(self, optimizer, loss_object):
# """This attack modifies only epsilon*n neurons.
#
# Inspired by: Diakonikolas, Ilias, et al. "Sever: A robust meta-algorithm for stochastic optimization. ICML 2019
#
# Warning: the convergence parameter is hard-codded as 0.01
# """
# assert self.malicious and self.config['contamination_model']
#
# # region contamination mask creation
# contamination_mask = [np.zeros_like(self.weights[i]) for i in range(len(self.weights))]
# layer_iter, layer_ind = 0, 0
# for ind in range(len(self.model.layers)):
# if type(self.model.layers[ind]) in [Conv2D, Dense]:
# elems = self.model.layers[ind].weights[0].shape[-1]
# elems_to_keep = int(elems * self.config['contamination_rate'][layer_iter])
# keep_inds = np.random.choice(elems, elems_to_keep, replace=False)
# contamination_mask[layer_ind][..., keep_inds] = 1 # weights
# contamination_mask[layer_ind + 1][keep_inds] = 1 # biases
#
# layer_iter += 1
# layer_ind += 2
# # endregion
#
# # backdoor with small noise
# batch_x = self.dataset.x_aux
# for local_epoch in range(100): # maximum number of local epochs
# with tf.GradientTape() as tape:
# # add a small noise to data samples
# loss_value = loss_object(y_true=self.dataset.mal_aux_labels,
# y_pred=self.model(self.add_noise(batch_x), training=True))
# if loss_value < 0.01:
# break
# grads = self._compute_gradients(tape, loss_value)
# # blackout gradients
# for i in range(len(grads)):
# grads[i] = grads[i] * contamination_mask[i]
#
# optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
#
# # boost weights
# new_weights = self.apply_attack(self.weights, self.model.get_weights())
# return new_weights
# def minimize_loss_attack(self, optimizer, loss_object, round):
# assert self.malicious
#
# mal_optimizer = self.create_malicious_optimizer()
# # refine on auxiliary dataset
# loss_value = 100
# epoch = 0
# while epoch < self.config['mal_num_epochs'] or loss_value > self.config['mal_target_loss']:
# for mal_batch_x, mal_batch_y in self.dataset.get_aux(self.config['mal_num_batch']):
# with tf.GradientTape() as tape:
# loss_value = loss_object(y_true=mal_batch_y,
# y_pred=self.model(self.add_noise(mal_batch_x), training=True))
# # if loss_value > 0.01:
# grads = self._compute_gradients(tape, loss_value)
# mal_optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
#
# self.update_weights_pgd()
#
# # print(f"Loss value mal {loss_value}")
# epoch += 1
#
# if epoch > self.config['mal_num_epochs_max']:
# logging.debug(f"Client {self.id}: Epoch break ({epoch})")
# break
#
# # boost weights
# new_weights = self.apply_attack(self.weights, self.model.get_weights())
# return new_weights
# def backdoor_stealth_attack(self, optimizer, loss_object, round):
# """Applies alternating minimization strategy, similar to bhagoji
#
# First, we train the honest model for one batch, and then the malicious samples, if the loss is still high.
# Iterates for as many benign batches exist.
#
# Note: Does not support the l2 norm constraint
# Note: Only supports boosting the full gradient, not only the malicious part.
# Note: Implemented as by Bhagoji. Trains the malicious samples with full batch size, this may not be desirable.
#
# """
#
# mal_optimizer = self.create_malicious_optimizer()
#
# loss_value_malicious = 100
# epoch = 0
# # current_weights = self.model.get_weights()
# # delta_mal_local = [np.zeros(w.shape) for w in current_weights]
# while epoch < self.config['mal_num_epochs'] or loss_value_malicious > self.config['mal_target_loss']:
#
# for (batch_x, batch_y) in self.dataset.get_data():
# with tf.GradientTape() as tape:
# pred = self.model(batch_x, training=True)
# pred_labels = np.argmax(pred, axis=1)
# loss_value = loss_object(y_true=batch_y, y_pred=pred)
# acc = np.mean(pred_labels == batch_y)
# logging.debug(f"Client {self.id}: Benign loss {loss_value} {acc}")
# grads = self._compute_gradients(tape, loss_value)
#
# optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
#
# # self.update_weights_pgd()
#
# # delta_benign = self.model.get_weights()
#
# for (x_aux, y_aux) in self.dataset.get_aux(self.config['mal_num_batch']):
# pred_mal = self.model(x_aux, training=False)
# loss_value_malicious = loss_object(y_true=y_aux,
# y_pred=pred_mal)
# if loss_value_malicious < self.config['mal_target_loss']:
# break
#
# with tf.GradientTape() as tape:
# pred_mal = self.model(x_aux, training=True)
# pred_mal_labels = np.argmax(pred_mal, axis=1)
# loss_value_malicious = loss_object(y_true=y_aux,
# y_pred=pred_mal)
# acc_mal = np.mean(pred_mal_labels == y_aux)
# self.debug(f"Mal loss {loss_value_malicious} {acc_mal}")
# grads = tape.gradient(loss_value_malicious, self.model.trainable_variables)
# mal_optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
#
# # end_weights = self.model.get_weights()
# # delta_mal = [delta_benign[i] - end_weights[i] for i in range(len(end_weights))]
# # delta_mal_local = [delta_mal_local[i] + delta_mal[i] for i in range(len(delta_mal))]
#
# if epoch > self.config['mal_num_epochs_max']:
# self.debug(f"Epoch break! {loss_value_malicious}")
# break
#
# epoch += 1
#
# # end_weights = self.model.get_weights()
# # new_weights = [end_weights[i] + (delta_mal_local[i] * (self.config['scale_attack_weight'] - 1)) for i in range(len(delta_mal_local))]
# new_weights = self.apply_attack(self.weights, self.model.get_weights())
# return new_weights
#
# def model_replacement_attack(self, optimizer, loss_object, round):
# # Note: Implemented as by `Can you really backdoor federated learning?` baseline attack
#
# poison_samples = self.config['poison_samples']
# mal_num_batch = self.config['mal_num_batch']
#
# # Uses custom StepDecay because we want to step more explicitly
# if self.config['mal_step_learning_rate']:
# step_decay = StepDecay(self.config['mal_learning_rate'], self.config['mal_num_epochs'] * mal_num_batch)
# mal_optimizer = Model.create_optimizer(self.config["optimizer"], step_decay, None, None, None, None, None)
# else:
# step_decay = None
# mal_optimizer = Model.create_optimizer(self.config["optimizer"], self.config['mal_learning_rate'], None,
# None, None, None, None)
# # loss_object = regularized_loss(self.model.layers, self.weights)
#
# loss_value_mal = 100
# for epoch in range(self.config['mal_num_epochs']):
# for batch_x, batch_y in self.dataset.get_data_with_aux(poison_samples, mal_num_batch): # 10 is ICML
# print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}")
# # image_augmentation.debug(batch_x, batch_y)
#
# with tf.GradientTape() as tape:
# loss_value = loss_object(y_true=batch_y, y_pred=self.model(batch_x, training=True))
# # print(f"Loss: {loss_value}")
# grads = self._compute_gradients(tape, loss_value)
# mal_optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
#
# if step_decay is not None:
# step_decay.apply_step()
#
# self.update_weights_pgd()
#
# loss_value_mal, acc_mal = self.eval_aux_test(loss_object)
#
# # acc_nonmal = tf.reduce_mean((batch_y[21:] == tf.argmax(self.model(batch_x[21:], training=True), axis=1)))
# # preds = self.model(batch_x[21:], training=False).numpy().argmax(axis=1)
# # pred_inds = preds == batch_y[21:].numpy()
# # # print(f"Correct: {self.global_dataset.y_aux_test[pred_inds]} -> {preds[pred_inds]}")
# # acc_nonmal = np.mean(pred_inds)
# logging.debug(f"Client {self.id}: Loss {loss_value_mal} acc {acc_mal}")
#
# # if step_decay is not None:
# # if loss_value_mal < self.config['mal_target_loss']:
# # step_decay.mul = 0.01
# # else:
# # step_decay.mul = 1.0
#
# # if loss_value_mal < self.config['mal_target_loss']:
# # self.debug(f"Below target loss {loss_value_mal}")
# # break
#
# self.debug("Epoch")
#
# new_weights = self.apply_attack(self.weights, self.model.get_weights())
# return new_weights
def malicious_training(self, round):
assert self.malicious
optimizer = self.create_honest_optimizer()
if self.config.benign_training.regularization_rate is not None:
loss_object = regularized_loss(self.model.layers, self.weights, self.config.benign_training.regularization_rate)
else:
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False) # Our model has a softmax layer!
attack_type = self.attack_type
logging.info(f"Client {self.id}: Malicious training")
if attack_type == Attack.UNTARGETED:
new_weights = self.perform_attack()
elif attack_type == Attack.DEVIATE_MAX_NORM:
new_weights = self.deviate_max_norm_attack()
elif attack_type == Attack.BACKDOOR:
new_weights = self.perform_attack()
else:
raise Exception('Unknown type of attack!')
if self.config.malicious.estimate_other_updates:
# I guess new_weights should be updated based on this difference
new_weights = self.apply_estimation(self.weights, new_weights)
return new_weights
# @profile
def train(self, round):
"""Performs local training"""
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.pgd_step_counter = 0
self.model.set_weights(self.weights)
# self.global_trainable_weight = [w.numpy() for w in self.model.trainable_weights]
if self.acting_malicious(round):
new_weights = self.malicious_training(round)
else:
new_weights = self.honest_training()
self.global_trainable_weight = None # Release
# loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
# from_logits=False) # Our model has a softmax layer!
# self.eval_train(loss_object)
new_weights = self.apply_defense(self.weights, new_weights)
new_weights = self.apply_quantization(self.weights, new_weights)
# print("Post clip")
# self.model.set_weights(new_weights)
# self.eval_train(loss_object)
# print(f"Train loss: {self.train_loss.result()}, acc: {self.train_accuracy.result()}")
self.model = None # Release
self.weights = new_weights
def apply_estimation(self, old_weights, new_weights):
if self.last_global_weights is None:
self.last_global_weights = old_weights # First round, don't estimate
self.last_update_weights = new_weights
return new_weights
logging.info(f"Client {self.id}: Global model estimation")
# Assume updates of other will be the same next round
other_updates = [(old_weights[i] - self.last_update_weights[i]) for i in range(len(old_weights))]
new_weights_with_diff = [new_weights[i] - other_updates[i] for i in range(len(old_weights))]
self.last_global_weights = old_weights # First round, don't estimate
self.last_update_weights = new_weights
return new_weights_with_diff
def apply_attack(self, old_weights, new_weights):
"""
Applies attacks based on configuration
:param old_weights: the weights of the model before training. Will be used to calculate the delta (malicious) weights
:param new_weights: the new weights after training
:return: new weights after applying data
"""
if self.config['scale_attack']:
return self._boost_weights(old_weights, new_weights)
return new_weights
def _replace_model(self, old_weights, new_weights):
# We could try to implement this
return None
def _boost_weights(self, old_weights, new_weights):
logging.info(f"Client {self.id}: Boosting weights with {self.config['scale_attack_weight']}")
delta_weights = [(new_weights[i] - old_weights[i]) * self.config['scale_attack_weight']
for i in range(len(old_weights))]
return [old_weights[i] + delta_weights[i] for i in range(len(old_weights))]
def apply_defense(self, old_weights, new_weights):
"""
Applies defenses based on configuration
:param clip:
:param old_weights:
:param new_weights:
:return: new weights
"""
assert old_weights is not None, "Old weights can't be none"
assert new_weights is not None, "New weights can't be none"
delta_weights = [new_weights[i] - old_weights[i]
for i in range(len(old_weights))]
# clip_layers = self.config['clip_layers'] if self.config['clip_layers'] != [] else range(len(old_weights))
clip_layers = range(len(old_weights))
clip = self.config.clip
if clip is None:
return new_weights
if clip.type == "linf":
if clip.probability is None:
delta_weights = [np.clip(delta_weights[i], -clip.value, clip.value) if i in clip_layers else delta_weights[i]
for i in range(len(delta_weights))]
# # Addition, clip layers less aggressively
# delta_weights = [np.clip(delta_weights[i], -clip * 5, clip * 5) if i not in clip_layers else delta_weights[i]
# for i in range(len(delta_weights))]
else:
delta_weights = self.random_clip_l0(delta_weights, clip.value, clip.probability, clip_layers)
if clip.type == "l2":
delta_weights = self.clip_l2(delta_weights, clip.value, clip_layers)
new_weights = [old_weights[i] + delta_weights[i] for i in range(len(old_weights))]
return new_weights
def random_clip_l0(self, delta_weights, clip, prob, clip_layers):
"""
Clip inf norm randomly
:param delta_weights: weights to clip
:param clip: clip value
:param prob: percentage of weights to clip
:return: randomly clipped weights
"""
new_weights = [
[np.clip(col_weights[i], -clip, clip) if i in clip_layers and random.random() < prob else col_weights[i]
for i in range(len(col_weights))] for col_weights in delta_weights]
return new_weights
def clip_l2(self, delta_weights, l2, clip_layers):
"""
Calculates the norm per layer.
:param delta_weights: current weight update
:param l2: l2 bound
:param clip_layers: what layers to apply clipping to
:return:
"""
l2_norm_tensor = tf.constant(l2)
layers_to_clip = [tf.reshape(delta_weights[i], [-1]) for i in range(len(delta_weights)) if
i in clip_layers] # for norm calculation
norm = max(tf.norm(tf.concat(layers_to_clip, axis=0)), 0.00001)
# print(f"Norm: {norm}")
multiply = min((l2_norm_tensor / norm).numpy(), 1.0)
return [delta_weights[i] * multiply if i in clip_layers else delta_weights[i] for i in
range(len(delta_weights))]
def clip_l2_per_layer(self, delta_weights, l2, clip_layers):
"""
@deprecated
Calculates the norm per layer. For all layers individually
:param delta_weights: current weight update
:param l2: l2 bound
:param clip_layers: what layers to apply clipping to
:return:
"""
norm = [tf.norm(delta_weights[i]) if i in clip_layers else tf.constant(l2) for i in range(len(delta_weights))]
multiply = [tf.constant(l2) / norm[i] for i in range(len(norm))]
return [delta_weights[i] * multiply[i] for i in range(len(delta_weights))]
def acting_malicious(self, round):
return self.malicious and self.config.malicious.attack_start <= round <= self.config.malicious.attack_stop
# def apply_pgd_weights(self, old_weights, new_weights):
# pgd = self.config['pgd']
# if pgd is not None:
#
# pgd_constraint = self.config['pgd_constraint'] / self.config['scale_attack_weight'] \
# if self.malicious and self.config['scale_attack'] \
# else self.config['pgd_constraint']
#
# self.debug(f"Applying constraint {pgd} with value {pgd_constraint}")
#
# if pgd == 'l_inf':
# new_weights = self.apply_defense(old_weights, new_weights, pgd_constraint, None)
# elif pgd == 'l2':
# new_weights = self.apply_defense(old_weights, new_weights, None, pgd_constraint)
# else:
# raise Exception('PGD type not supported')
# return new_weights
# def update_weights_pgd(self):
# self.pgd_step_counter += 1
# if self.pgd_step_counter % self.config['pgd_clip_frequency'] != 0:
# # not yet time to clip
# return
#
# new_weights = self.apply_pgd_weights(self.weights, self.model.get_weights())
# self.model.set_weights(new_weights)
def eval_train(self, loss_object):
total_loss = 0.0
batch_count = 0.0
for batch_x, batch_y in self.dataset.get_data():
loss_value = loss_object(y_true=batch_y, y_pred=self.model(batch_x, training=False))
total_loss += loss_value
batch_count += 1
loss = total_loss / batch_count
logging.debug(f"Client {self.id}: Training loss {loss}")
def eval_aux_test(self, loss_object):
for batch_x, batch_y in self.dataset.get_aux_test_generator(1):
preds = self.model(batch_x, training=False)
loss_value = loss_object(y_true=batch_y, y_pred=preds)
pred_inds = preds.numpy().argmax(axis=1) == batch_y
adv_success = np.mean(pred_inds)
return loss_value, adv_success
def create_honest_optimizer(self):
training = self.config.benign_training
num_batches = self.dataset.x_train.shape[0] / training.batch_size
steps_per_round = num_batches * training.num_epochs
return Model.create_optimizer(training.optimizer, training.learning_rate,
training.decay, steps_per_round)
def debug(self, v):
logging.debug(f"Client {self.id}: {v}")
def info(self, v):
logging.info(f"Client {self.id}: {v}")
@staticmethod
def average_weights(client_weight_list):
"""Procedure for averaging client weights"""
new_weights = deepcopy(client_weight_list[0])
# return new_weights
for client in range(1, len(client_weight_list)):
for layer in range(len(client_weight_list[client])):
new_weights[layer] = new_weights[layer] + client_weight_list[client][layer]
for layer in range(len(new_weights)):
new_weights[layer] = new_weights[layer] / len(client_weight_list)
return new_weights
| 34,219 | 43.849279 | 155 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.