repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
DSLA-DSLA
DSLA-DSLA/mmdet/models/necks/nas_fpn.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell from mmcv.runner import BaseModule, ModuleList from ..builder import NECKS @NECKS.register_module() class NASFPN(BaseModule): """NAS-FPN. Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection <https://arxiv.org/abs/1904.07392>`_ Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. stack_times (int): The number of times the pyramid architecture will be stacked. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): It decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=-1, add_extra_convs=False, norm_cfg=None, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(NASFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) # num of input feature levels self.num_outs = num_outs # num of output feature levels self.stack_times = stack_times self.norm_cfg = norm_cfg if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs # add lateral connections self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.lateral_convs.append(l_conv) # add extra downsample layers (stride-2 pooling or conv) extra_levels = num_outs - self.backbone_end_level + self.start_level self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_conv = ConvModule( out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.extra_downsamples.append( nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) # add NAS FPN connections self.fpn_stages = ModuleList() for _ in range(self.stack_times): stage = nn.ModuleDict() # gp(p6, p4) -> p4_1 stage['gp_64_4'] = GlobalPoolingCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p4_1, p4) -> p4_2 stage['sum_44_4'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p4_2, p3) -> p3_out stage['sum_43_3'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p3_out, p4_2) -> p4_out stage['sum_34_4'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p5, gp(p4_out, p3_out)) -> p5_out stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) stage['sum_55_5'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p7, gp(p5_out, p4_2)) -> p7_out stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) stage['sum_77_7'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # gp(p7_out, p5_out) -> p6_out stage['gp_75_6'] = GlobalPoolingCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) self.fpn_stages.append(stage) def forward(self, inputs): """Forward function.""" # build P3-P5 feats = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build P6-P7 on top of P5 for downsample in self.extra_downsamples: feats.append(downsample(feats[-1])) p3, p4, p5, p6, p7 = feats for stage in self.fpn_stages: # gp(p6, p4) -> p4_1 p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) # sum(p4_1, p4) -> p4_2 p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) # sum(p4_2, p3) -> p3_out p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) # sum(p3_out, p4_2) -> p4_out p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) # sum(p5, gp(p4_out, p3_out)) -> p5_out p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) # sum(p7, gp(p5_out, p4_2)) -> p7_out p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) # gp(p7_out, p5_out) -> p6_out p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) return p3, p4, p5, p6, p7
6,577
40.371069
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/necks/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .bfp import BFP from .channel_mapper import ChannelMapper from .ct_resnet_neck import CTResNetNeck from .dilated_encoder import DilatedEncoder from .fpg import FPG from .fpn import FPN from .fpn_carafe import FPN_CARAFE from .hrfpn import HRFPN from .nas_fpn import NASFPN from .nasfcos_fpn import NASFCOS_FPN from .pafpn import PAFPN from .rfp import RFP from .ssd_neck import SSDNeck from .yolo_neck import YOLOV3Neck from .yolox_pafpn import YOLOXPAFPN __all__ = [ 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder', 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN' ]
710
29.913043
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/necks/bfp.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.cnn.bricks import NonLocal2d from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class BFP(BaseModule): """BFP (Balanced Feature Pyramids) BFP takes multi-level features as inputs and gather them into a single one, then refine the gathered feature and scatter the refined results to multi-level features. This module is used in Libra R-CNN (CVPR 2019), see the paper `Libra R-CNN: Towards Balanced Learning for Object Detection <https://arxiv.org/abs/1904.02701>`_ for details. Args: in_channels (int): Number of input channels (feature maps of all levels should have the same channels). num_levels (int): Number of input feature levels. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. refine_level (int): Index of integration and refine level of BSF in multi-level features from bottom to top. refine_type (str): Type of the refine op, currently support [None, 'conv', 'non_local']. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(BFP, self).__init__(init_cfg) assert refine_type in [None, 'conv', 'non_local'] self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert 0 <= self.refine_level < self.num_levels if self.refine_type == 'conv': self.refine = ConvModule( self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif self.refine_type == 'non_local': self.refine = NonLocal2d( self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, inputs): """Forward function.""" assert len(inputs) == self.num_levels # step 1: gather multi-level features by resize and average feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if i < self.refine_level: gathered = F.adaptive_max_pool2d( inputs[i], output_size=gather_size) else: gathered = F.interpolate( inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = sum(feats) / len(feats) # step 2: refine gathered features if self.refine_type is not None: bsf = self.refine(bsf) # step 3: scatter refined features to multi-levels by a residual path outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if i < self.refine_level: residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append(residual + inputs[i]) return tuple(outs)
3,777
35.679612
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/necks/yolo_neck.py
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import torch import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS class DetectionBlock(BaseModule): """Detection block in YOLO neck. Let out_channels = n, the DetectionBlock contains: Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. The first 6 ConvLayers are formed the following way: 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. The Conv2D layer is 1x1x255. Some block will have branch after the fifth ConvLayer. The input channel is arbitrary (in_channels) Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(DetectionBlock, self).__init__(init_cfg) double_out_channels = out_channels * 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out @NECKS.register_module() class YOLOV3Neck(BaseModule): """The neck of YOLOV3. It can be treated as a simplified version of FPN. It will take the result from Darknet backbone and do some upsampling and concatenation. It will finally output the detection result. Note: The input feats should be from top to bottom. i.e., from high-lvl to low-lvl But YOLOV3Neck will process them in reversed order. i.e., from bottom (high-lvl) to top (low-lvl) Args: num_scales (int): The number of scales / stages. in_channels (List[int]): The number of input channels per scale. out_channels (List[int]): The number of output channels per scale. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict, optional): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(YOLOV3Neck, self).__init__(init_cfg) assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) # To support arbitrary scales, the code looks awful, but it works. # Better solution is welcomed. self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): in_c, out_c = self.in_channels[i], self.out_channels[i] inter_c = out_channels[i - 1] self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) # in_c + out_c : High-lvl feats will be cat with low-lvl feats self.add_module(f'detect{i+1}', DetectionBlock(in_c + out_c, out_c, **cfg)) def forward(self, feats): assert len(feats) == self.num_scales # processed from bottom (high-lvl) to top (low-lvl) outs = [] out = self.detect1(feats[-1]) outs.append(out) for i, x in enumerate(reversed(feats[:-1])): conv = getattr(self, f'conv{i+1}') tmp = conv(out) # Cat with low-lvl feats tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{i+2}') out = detect(tmp) outs.append(out) return tuple(outs)
5,431
37.524823
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/necks/channel_mapper.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import NECKS @NECKS.register_module() class ChannelMapper(BaseModule): r"""Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. act_cfg (dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). num_outs (int, optional): Number of output feature maps. There would be extra_convs when num_outs larger than the length of in_channels. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), num_outs=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform')): super(ChannelMapper, self).__init__(init_cfg) assert isinstance(in_channels, list) self.extra_convs = None if num_outs is None: num_outs = len(in_channels) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) if num_outs > len(in_channels): self.extra_convs = nn.ModuleList() for i in range(len(in_channels), num_outs): if i == len(in_channels): in_channel = in_channels[-1] else: in_channel = out_channels self.extra_convs.append( ConvModule( in_channel, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] if self.extra_convs: for i in range(len(self.extra_convs)): if i == 0: outs.append(self.extra_convs[0](inputs[-1])) else: outs.append(self.extra_convs[i](outs[-1])) return tuple(outs)
3,975
38.366337
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/necks/hrfpn.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.utils.checkpoint import checkpoint from ..builder import NECKS @NECKS.register_module() class HRFPN(BaseModule): """HRFPN (High Resolution Feature Pyramids) paper: `High-Resolution Representations for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_. Args: in_channels (list): number of channels for each branch. out_channels (int): output channels of feature pyramids. num_outs (int): number of output stages. pooling_type (str): pooling for generating feature pyramids from {MAX, AVG}. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. stride (int): stride of 3x3 convolutional layers init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): super(HRFPN, self).__init__(init_cfg) assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def forward(self, inputs): """Forward function.""" assert len(inputs) == self.num_ins outs = [inputs[0]] for i in range(1, self.num_ins): outs.append( F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) out = torch.cat(outs, dim=1) if out.requires_grad and self.with_cp: out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) outputs = [] for i in range(self.num_outs): if outs[i].requires_grad and self.with_cp: tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs)
3,509
33.752475
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/nasfcos_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmdet.models.dense_heads.fcos_head import FCOSHead from ..builder import HEADS @HEADS.register_module() class NASFCOSHead(FCOSHead): """Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_. It is quite similar with FCOS head, except for the searched structure of classification branch and bbox regression branch, where a structure of "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead. """ def __init__(self, *args, init_cfg=None, **kwargs): if init_cfg is None: init_cfg = [ dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), dict( type='Normal', std=0.01, override=[ dict(name='conv_reg'), dict(name='conv_centerness'), dict( name='conv_cls', type='Normal', std=0.01, bias_prob=0.01) ]), ] super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" dconv3x3_config = dict( type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [ dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config ] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i, op_ in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = self.in_channels if i == 0 else self.feat_channels assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule( chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
2,908
34.91358
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/reppoints_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmdet.core import (build_assigner, build_sampler, images_to_levels, multi_apply, unmap) from mmdet.core.anchor.point_generator import MlvlPointGenerator from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class RepPointsHead(AnchorFreeHead): """RepPoint head. Args: point_feat_channels (int): Number of channels of points features. gradient_mul (float): The multiplier to gradients from points refinement and recognition. point_strides (Iterable): points strides. point_base_scale (int): bbox scale for assigning labels. loss_cls (dict): Config of classification loss. loss_bbox_init (dict): Config of initial points loss. loss_bbox_refine (dict): Config of points loss in refinement. use_grid_points (bool): If we use bounding box representation, the reppoints is represented as grid points on the bounding box. center_init (bool): Whether to use center point assignment. transform_method (str): The methods to transform RepPoints to bbox. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes, in_channels, point_feat_channels=256, num_points=9, gradient_mul=0.1, point_strides=[8, 16, 32, 64, 128], point_base_scale=4, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), loss_bbox_refine=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), use_grid_points=False, center_init=True, transform_method='moment', moment_mul=0.01, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='reppoints_cls_out', std=0.01, bias_prob=0.01)), **kwargs): self.num_points = num_points self.point_feat_channels = point_feat_channels self.use_grid_points = use_grid_points self.center_init = center_init # we use deform conv to extract points features self.dcn_kernel = int(np.sqrt(num_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) assert self.dcn_kernel * self.dcn_kernel == num_points, \ 'The points number should be a square number.' assert self.dcn_kernel % 2 == 1, \ 'The points number should be an odd square number.' dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super().__init__( num_classes, in_channels, loss_cls=loss_cls, init_cfg=init_cfg, **kwargs) self.gradient_mul = gradient_mul self.point_base_scale = point_base_scale self.point_strides = point_strides self.prior_generator = MlvlPointGenerator( self.point_strides, offset=0.) self.sampling = loss_cls['type'] not in ['FocalLoss'] if self.train_cfg: self.init_assigner = build_assigner(self.train_cfg.init.assigner) self.refine_assigner = build_assigner( self.train_cfg.refine.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.transform_method = transform_method if self.transform_method == 'moment': self.moment_transfer = nn.Parameter( data=torch.zeros(2), requires_grad=True) self.moment_mul = moment_mul self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 self.loss_bbox_init = build_loss(loss_bbox_init) self.loss_bbox_refine = build_loss(loss_bbox_refine) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points self.reppoints_cls_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, self.cls_out_channels, 1, 1, 0) self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, self.point_feat_channels, 3, 1, 1) self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) def points2bbox(self, pts, y_first=True): """Converting the points set into bounding box. :param pts: the input points sets (fields), each points set (fields) is represented as 2n scalar. :param y_first: if y_first=True, the point set is represented as [y1, x1, y2, x2 ... yn, xn], otherwise the point set is represented as [x1, y1, x2, y2 ... xn, yn]. :return: each points set is converting to a bbox [x1, y1, x2, y2]. """ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, ...] pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, ...] if self.transform_method == 'minmax': bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'partial_minmax': pts_y = pts_y[:, :4, ...] pts_x = pts_x[:, :4, ...] bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'moment': pts_y_mean = pts_y.mean(dim=1, keepdim=True) pts_x_mean = pts_x.mean(dim=1, keepdim=True) pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) moment_transfer = (self.moment_transfer * self.moment_mul) + ( self.moment_transfer.detach() * (1 - self.moment_mul)) moment_width_transfer = moment_transfer[0] moment_height_transfer = moment_transfer[1] half_width = pts_x_std * torch.exp(moment_width_transfer) half_height = pts_y_std * torch.exp(moment_height_transfer) bbox = torch.cat([ pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width, pts_y_mean + half_height ], dim=1) else: raise NotImplementedError return bbox def gen_grid_from_reg(self, reg, previous_boxes): """Base on the previous bboxes and regression values, we compute the regressed bboxes and generate the grids on the bboxes. :param reg: the regression value to previous bboxes. :param previous_boxes: previous bboxes. :return: generate grids on the regressed bboxes. """ b, _, h, w = reg.shape bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. bwh = (previous_boxes[:, 2:, ...] - previous_boxes[:, :2, ...]).clamp(min=1e-6) grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( reg[:, 2:, ...]) grid_wh = bwh * torch.exp(reg[:, 2:, ...]) grid_left = grid_topleft[:, [0], ...] grid_top = grid_topleft[:, [1], ...] grid_width = grid_wh[:, [0], ...] grid_height = grid_wh[:, [1], ...] intervel = torch.linspace(0., 1., self.dcn_kernel).view( 1, self.dcn_kernel, 1, 1).type_as(reg) grid_x = grid_left + grid_width * intervel grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) grid_x = grid_x.view(b, -1, h, w) grid_y = grid_top + grid_height * intervel grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) grid_y = grid_y.view(b, -1, h, w) grid_yx = torch.stack([grid_y, grid_x], dim=2) grid_yx = grid_yx.view(b, -1, h, w) regressed_bbox = torch.cat([ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height ], 1) return grid_yx, regressed_bbox def forward(self, feats): return multi_apply(self.forward_single, feats) def forward_single(self, x): """Forward feature map of a single FPN level.""" dcn_base_offset = self.dcn_base_offset.type_as(x) # If we use center_init, the initial reppoints is from center points. # If we use bounding bbox representation, the initial reppoints is # from regular grid placed on a pre-defined bbox. if self.use_grid_points or not self.center_init: scale = self.point_base_scale / 2 points_init = dcn_base_offset / dcn_base_offset.max() * scale bbox_init = x.new_tensor([-scale, -scale, scale, scale]).view(1, 4, 1, 1) else: points_init = 0 cls_feat = x pts_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: pts_feat = reg_conv(pts_feat) # initialize reppoints pts_out_init = self.reppoints_pts_init_out( self.relu(self.reppoints_pts_init_conv(pts_feat))) if self.use_grid_points: pts_out_init, bbox_out_init = self.gen_grid_from_reg( pts_out_init, bbox_init.detach()) else: pts_out_init = pts_out_init + points_init # refine and classify reppoints pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( ) + self.gradient_mul * pts_out_init dcn_offset = pts_out_init_grad_mul - dcn_base_offset cls_out = self.reppoints_cls_out( self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) pts_out_refine = self.reppoints_pts_refine_out( self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) if self.use_grid_points: pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( pts_out_refine, bbox_out_init.detach()) else: pts_out_refine = pts_out_refine + pts_out_init.detach() if self.training: return cls_out, pts_out_init, pts_out_refine else: return cls_out, self.points2bbox(pts_out_refine) def get_points(self, featmap_sizes, img_metas, device): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. Returns: tuple: points of each image, valid flags of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # points center for one time multi_level_points = self.prior_generator.grid_priors( featmap_sizes, device=device, with_stride=True) points_list = [[point.clone() for point in multi_level_points] for _ in range(num_imgs)] # for each image, we compute valid flags of multi level grids valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape']) valid_flag_list.append(multi_level_flags) return points_list, valid_flag_list def centers_to_bboxes(self, point_list): """Get bboxes according to center points. Only used in :class:`MaxIoUAssigner`. """ bbox_list = [] for i_img, point in enumerate(point_list): bbox = [] for i_lvl in range(len(self.point_strides)): scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 bbox_shift = torch.Tensor([-scale, -scale, scale, scale]).view(1, 4).type_as(point[0]) bbox_center = torch.cat( [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift) bbox_list.append(bbox) return bbox_list def offset_to_pts(self, center_list, pred_list): """Change from point offset to point coordinate.""" pts_list = [] for i_lvl in range(len(self.point_strides)): pts_lvl = [] for i_img in range(len(center_list)): pts_center = center_list[i_img][i_lvl][:, :2].repeat( 1, self.num_points) pts_shift = pred_list[i_lvl][i_img] yx_pts_shift = pts_shift.permute(1, 2, 0).view( -1, 2 * self.num_points) y_pts_shift = yx_pts_shift[..., 0::2] x_pts_shift = yx_pts_shift[..., 1::2] xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center pts_lvl.append(pts) pts_lvl = torch.stack(pts_lvl, 0) pts_list.append(pts_lvl) return pts_list def _point_target_single(self, flat_proposals, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, stage='init', unmap_outputs=True): inside_flags = valid_flags if not inside_flags.any(): return (None, ) * 7 # assign gt and sample proposals proposals = flat_proposals[inside_flags, :] if stage == 'init': assigner = self.init_assigner pos_weight = self.train_cfg.init.pos_weight else: assigner = self.refine_assigner pos_weight = self.train_cfg.refine.pos_weight assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, proposals, gt_bboxes) num_valid_proposals = proposals.shape[0] bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) pos_proposals = torch.zeros_like(proposals) proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) labels = proposals.new_full((num_valid_proposals, ), self.num_classes, dtype=torch.long) label_weights = proposals.new_zeros( num_valid_proposals, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_gt_bboxes = sampling_result.pos_gt_bboxes bbox_gt[pos_inds, :] = pos_gt_bboxes pos_proposals[pos_inds, :] = proposals[pos_inds, :] proposals_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of proposals if unmap_outputs: num_total_proposals = flat_proposals.size(0) labels = unmap(labels, num_total_proposals, inside_flags) label_weights = unmap(label_weights, num_total_proposals, inside_flags) bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags) proposals_weights = unmap(proposals_weights, num_total_proposals, inside_flags) return (labels, label_weights, bbox_gt, pos_proposals, proposals_weights, pos_inds, neg_inds) def get_targets(self, proposals_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, stage='init', label_channels=1, unmap_outputs=True): """Compute corresponding GT box and classification targets for proposals. Args: proposals_list (list[list]): Multi level points/bboxes of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_bboxes_list (list[Tensor]): Ground truth labels of each box. stage (str): `init` or `refine`. Generate target for init stage or refine stage label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501 - bbox_gt_list (list[Tensor]): Ground truth bbox of each level. - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501 - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501 - num_total_pos (int): Number of positive samples in all images. # noqa: E501 - num_total_neg (int): Number of negative samples in all images. # noqa: E501 """ assert stage in ['init', 'refine'] num_imgs = len(img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs # points number of multi levels num_level_proposals = [points.size(0) for points in proposals_list[0]] # concat all level points and flags to a single tensor for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply( self._point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, stage=stage, unmap_outputs=unmap_outputs) # no valid points if any([labels is None for labels in all_labels]): return None # sampled points of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) labels_list = images_to_levels(all_labels, num_level_proposals) label_weights_list = images_to_levels(all_label_weights, num_level_proposals) bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) proposals_list = images_to_levels(all_proposals, num_level_proposals) proposal_weights_list = images_to_levels(all_proposal_weights, num_level_proposals) return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg) def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels, label_weights, bbox_gt_init, bbox_weights_init, bbox_gt_refine, bbox_weights_refine, stride, num_total_samples_init, num_total_samples_refine): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) cls_score = cls_score.contiguous() loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples_refine) # points loss bbox_gt_init = bbox_gt_init.reshape(-1, 4) bbox_weights_init = bbox_weights_init.reshape(-1, 4) bbox_pred_init = self.points2bbox( pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) bbox_pred_refine = self.points2bbox( pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) normalize_term = self.point_base_scale * stride loss_pts_init = self.loss_bbox_init( bbox_pred_init / normalize_term, bbox_gt_init / normalize_term, bbox_weights_init, avg_factor=num_total_samples_init) loss_pts_refine = self.loss_bbox_refine( bbox_pred_refine / normalize_term, bbox_gt_refine / normalize_term, bbox_weights_refine, avg_factor=num_total_samples_refine) return loss_cls, loss_pts_init, loss_pts_refine def loss(self, cls_scores, pts_preds_init, pts_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 # target for initial stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas, device) pts_coordinate_preds_init = self.offset_to_pts(center_list, pts_preds_init) if self.train_cfg.init.assigner['type'] == 'PointAssigner': # Assign target for center list candidate_list = center_list else: # transform center list to bbox list and # assign target for bbox list bbox_list = self.centers_to_bboxes(center_list) candidate_list = bbox_list cls_reg_targets_init = self.get_targets( candidate_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, stage='init', label_channels=label_channels) (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, num_total_pos_init, num_total_neg_init) = cls_reg_targets_init num_total_samples_init = ( num_total_pos_init + num_total_neg_init if self.sampling else num_total_pos_init) # target for refinement stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas, device) pts_coordinate_preds_refine = self.offset_to_pts( center_list, pts_preds_refine) bbox_list = [] for i_img, center in enumerate(center_list): bbox = [] for i_lvl in range(len(pts_preds_refine)): bbox_preds_init = self.points2bbox( pts_preds_init[i_lvl].detach()) bbox_shift = bbox_preds_init * self.point_strides[i_lvl] bbox_center = torch.cat( [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) bbox_list.append(bbox) cls_reg_targets_refine = self.get_targets( bbox_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, stage='refine', label_channels=label_channels) (labels_list, label_weights_list, bbox_gt_list_refine, candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine, num_total_neg_refine) = cls_reg_targets_refine num_total_samples_refine = ( num_total_pos_refine + num_total_neg_refine if self.sampling else num_total_pos_refine) # compute loss losses_cls, losses_pts_init, losses_pts_refine = multi_apply( self.loss_single, cls_scores, pts_coordinate_preds_init, pts_coordinate_preds_refine, labels_list, label_weights_list, bbox_gt_list_init, bbox_weights_list_init, bbox_gt_list_refine, bbox_weights_list_refine, self.point_strides, num_total_samples_init=num_total_samples_init, num_total_samples_refine=num_total_samples_refine) loss_dict_all = { 'loss_cls': losses_cls, 'loss_pts_init': losses_pts_init, 'loss_pts_refine': losses_pts_refine } return loss_dict_all # Same as base_dense_head/_get_bboxes_single except self._bbox_decode def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. RepPoints head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, priors) in enumerate( zip(cls_score_list, bbox_pred_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, self.point_strides[level_idx], img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process( mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale=rescale, with_nms=with_nms) def _bbox_decode(self, points, bbox_pred, stride, max_shape): bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * stride + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0]) decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return decoded_bboxes
34,936
44.669281
101
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/dsla_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Scale, normal_init from mmcv.runner import force_fp32 import numpy from mmdet.core import distance2bbox, multi_apply, multiclass_nms, reduce_mean, bbox_overlaps from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead INF = 1e8 @HEADS.register_module() class DSLAHead(AnchorFreeHead): def __init__(self, num_classes, in_channels, regress_ranges=((0, 64), (64, 128), (128, 256), (256, 512), (512, INF)), enable_interval_relaxation=True, interval_relaxation_factor=0.2, enable_centerness_core_zone=True, enable_iou_score_coupling=True, enable_iou_score_only=False, center_sampling=False, center_sample_radius=1.5, norm_on_bbox=False, loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, beta=2.0, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), **kwargs): self.regress_ranges = regress_ranges self.interval_relaxation_factor = interval_relaxation_factor assert 0.0 <= self.interval_relaxation_factor < 1.0, 'interval_relaxation_factor must be in [0, 1) !!!' assert loss_cls['type'] in ['QualityFocalLoss'], 'cls loss only support "QualityFocalLoss" !!!' self.cls_loss = loss_cls # get gray ranges ---------------------------------------------------- self.interval_relaxation_ranges = [(int(low * (1 - self.interval_relaxation_factor)), int(up * (1 + self.interval_relaxation_factor))) for (low, up) in self.regress_ranges] self.enable_interval_relaxation = enable_interval_relaxation self.enable_iou_score_coupling = enable_iou_score_coupling self.enable_centerness_core_zone = enable_centerness_core_zone self.enable_iou_score_only = enable_iou_score_only self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.norm_on_bbox = norm_on_bbox super().__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" super()._init_layers() self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def init_weights(self): """Initialize weights of the head.""" super().init_weights() def forward(self, feats): """Forward features from the upstream network. Args:scale_relaxation_ranges = [(int(low * (1 - self.scale_relaxation_factor)), int(up * (1 + self.scale_relaxatio feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box scores for each scale level, \scale_relaxation_ranges = [(int(low * (1 - self.scale_relaxation_factor)), int(up * (1 + self.scale_relaxatio each is a 4D-tensor, the channel number is \ num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is \ num_points * 4. centernesses (list[Tensor]): Centerss for each scale level, \ each is a 4D-tensor, the channel number is num_points * 1. """ return multi_apply(self.forward_single, feats, self.scales, self.strides) def forward_single(self, x, scale, stride): """Forward features of a single scale levle. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness \ predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: bbox_pred = F.relu(bbox_pred) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() return cls_score, bbox_pred @force_fp32(apply_to=('cls_score_preds', 'bbox_reg_preds')) def loss(self, cls_score_preds, bbox_reg_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_score_preds (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_reg_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_score_preds) == len(bbox_reg_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_score_preds] all_level_points = self.get_points(featmap_sizes, bbox_reg_preds[0].dtype, bbox_reg_preds[0].device) cls_score_targets, bbox_reg_targets = self.get_targets(all_level_points, gt_bboxes, gt_labels) num_imgs = cls_score_preds[0].size(0) # flatten preds flatten_cls_score_preds = [cls_score_pred.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score_pred in cls_score_preds] flatten_bbox_reg_preds = [bbox_reg_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_reg_pred in bbox_reg_preds] flatten_cls_score_preds = torch.cat(flatten_cls_score_preds) flatten_bbox_reg_preds = torch.cat(flatten_bbox_reg_preds) flatten_cls_score_targets = torch.cat(cls_score_targets) flatten_bbox_reg_targets = torch.cat(bbox_reg_targets) # repeat points to align with bbox_preds flatten_points = torch.cat([points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes # 此时labels是PxC的,C为类别数,首先需要获取每个point的最大值 max_cls_score_targets, max_cls_score_indexes = flatten_cls_score_targets.max(dim=-1) pos_indexes = (max_cls_score_targets > 0).nonzero().reshape(-1) pos_bbox_reg_preds = flatten_bbox_reg_preds[pos_indexes] iou_score_targets = flatten_cls_score_preds.new_zeros(max_cls_score_targets.shape) if len(pos_indexes) > 0: pos_bbox_reg_targets = flatten_bbox_reg_targets[pos_indexes] pos_points = flatten_points[pos_indexes] pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_reg_preds) pos_decoded_bbox_targets = distance2bbox(pos_points, pos_bbox_reg_targets) # bbox_reg_weights = flatten_cls_score_preds.detach().sigmoid() # bbox_reg_weights = bbox_reg_weights[pos_indexes][range(len(pos_indexes)), max_cls_score_indexes[pos_indexes]] # bbox_reg_weights_denorm = max(reduce_mean(bbox_reg_weights.sum()), 1.0) bbox_reg_weights = max_cls_score_targets[pos_indexes] bbox_reg_weights_denorm = max(reduce_mean(bbox_reg_weights.sum()), 1.0) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_bbox_targets, weight=bbox_reg_weights, avg_factor=bbox_reg_weights_denorm) iou_score_targets[pos_indexes] = bbox_overlaps(pos_decoded_bbox_preds.detach(), pos_decoded_bbox_targets, is_aligned=True) else: loss_bbox = pos_bbox_reg_preds.sum() if self.enable_iou_score_coupling: max_cls_coupled_score_targets = iou_score_targets * max_cls_score_targets else: max_cls_coupled_score_targets = max_cls_score_targets if self.enable_iou_score_only: max_cls_coupled_score_targets = iou_score_targets label_targets = max_cls_score_indexes * (max_cls_coupled_score_targets > 0) + self.num_classes * (max_cls_coupled_score_targets <= 0) cls_weights_denorm = max(reduce_mean(max_cls_coupled_score_targets.sum()), 1.0) loss_cls = self.loss_cls(flatten_cls_score_preds, [label_targets, max_cls_coupled_score_targets], avg_factor=cls_weights_denorm) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, ) def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map sizes.""" y, x = super()._get_points_single(featmap_size, stride, dtype, device) points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), dim=-1) # + stride // 2 return points def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerss targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: concat_lvl_labels (list[Tensor]): Labels of each level. \ concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [points[i].new_tensor(self.regress_ranges[i])[None].expand_as(points[i]) for i in range(num_levels)] # expand gray ranges expanded_scale_relaxation_ranges = [points[i].new_tensor(self.interval_relaxation_ranges[i])[None].expand_as(points[i]) for i in range(num_levels)] # expand stride expanded_strides_list = [points[i].new_tensor(self.strides[i]).expand(points[i].size(0)) for i in range(num_levels)] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_scale_relaxation_ranges = torch.cat(expanded_scale_relaxation_ranges, dim=0) concat_points = torch.cat(points, dim=0) concat_strides = torch.cat(expanded_strides_list, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self._get_target_single, gt_bboxes_list, gt_labels_list, points=concat_points, regress_ranges=concat_regress_ranges, interval_relaxation_ranges=concat_scale_relaxation_ranges, num_points_per_lvl=num_points, strides=concat_strides) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append(torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat([bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return concat_lvl_labels, concat_lvl_bbox_targets def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, interval_relaxation_ranges, num_points_per_lvl, strides): """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = gt_labels.size(0) classification_targets = gt_bboxes.new_zeros((num_points, self.num_classes)) regression_targets = gt_bboxes.new_zeros((num_points, 4)) if num_gts == 0: return classification_targets, regression_targets regress_ranges = regress_ranges[:, None, :].expand(num_points, num_gts, 2) interval_relaxation_ranges = interval_relaxation_ranges[:, None, :].expand(num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) gt_labels = gt_labels[None].expand(num_points, num_gts) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # centerness score filtered_bbox_targets = bbox_targets * inside_gt_bbox_mask[..., None].expand((num_points, num_gts, 4)) # 过滤掉不在bbox中的point point_centerness_scores = self.centerness_score(filtered_bbox_targets) # PxN if self.enable_centerness_core_zone: gt_bboxes_center_x = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 gt_bboxes_center_y = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 strides = strides[..., None].expand((num_points, num_gts)) core_zone_left = gt_bboxes_center_x - strides / 2 core_zone_right = gt_bboxes_center_x + strides / 2 core_zone_top = gt_bboxes_center_y - strides / 2 core_zone_bottom = gt_bboxes_center_y + strides / 2 inside_core_zone = (xs >= core_zone_left) & (xs <= core_zone_right) & (ys >= core_zone_top) & (ys <= core_zone_bottom) inside_core_zone = inside_core_zone & inside_gt_bbox_mask # in case that the point is out of bbox point_centerness_scores = point_centerness_scores * (~inside_core_zone) + inside_core_zone # scale multiplier assign_measure = bbox_targets.max(-1)[0] if self.enable_interval_relaxation: scale_scores = self.interval_relaxation_score(assign_measure, regress_ranges, interval_relaxation_ranges) else: scale_scores = (regress_ranges[..., 0] <= assign_measure) & (assign_measure <= regress_ranges[..., 1]) final_scores = point_centerness_scores * scale_scores positive_condition = final_scores > 0.0 # sort scores sorted_final_scores, sorted_indexes = final_scores.sort(dim=1) intermediate_indexes = sorted_indexes.new_tensor(range(sorted_indexes.size(0)))[..., None].expand(sorted_indexes.size(0), sorted_indexes.size(1)) # reranking sorted_gt_labels = gt_labels[intermediate_indexes, sorted_indexes] sorted_positive_condition = positive_condition[intermediate_indexes, sorted_indexes] indexes_1, indexes_2 = torch.where(sorted_positive_condition) positive_label_indexes = sorted_gt_labels[indexes_1, indexes_2] classification_targets[indexes_1, positive_label_indexes] = sorted_final_scores[indexes_1, indexes_2] # if there are more than one objects for a location, # we choose the one with the highest score _, select_indexes = sorted_final_scores.max(dim=1) sorted_bbox_targets = bbox_targets[intermediate_indexes, sorted_indexes] regression_targets = sorted_bbox_targets[range(num_points), select_indexes] return classification_targets, regression_targets def centerness_score(self, bbox_targets): """Compute centerness targets. Args: bbox_targets (Tensor): BBox targets of all bboxes in shape (num_pos, num_bbox, 4) Returns: Tensor: Centerness target. """ # only calculate pos centerness targets, otherwise there may be nan left_right = bbox_targets[..., [0, 2]] top_bottom = bbox_targets[..., [1, 3]] # clamp to avoid zero-divisor centerness_targets = ((left_right.min(dim=-1)[0]).clamp(min=0.0) / (left_right.max(dim=-1)[0]).clamp(min=0.01)) * \ ((top_bottom.min(dim=-1)[0]).clamp(min=0.0) / (top_bottom.max(dim=-1)[0]).clamp(min=0.01)) return torch.sqrt(centerness_targets) def interval_relaxation_score(self, measure, regress_ranges, gray_ranges): # linear left_gray_multiplier = (measure - gray_ranges[..., 0]) / (regress_ranges[..., 0] - gray_ranges[..., 0]).clamp(min=0.01) left_gray_indicator = (gray_ranges[..., 0] <= measure) & (measure < regress_ranges[..., 0]) green_indicator = (regress_ranges[..., 0] <= measure) & (measure <= regress_ranges[..., 1]) right_gray_multiplier = (gray_ranges[..., 1] - measure) / (gray_ranges[..., 1] - regress_ranges[..., 1]).clamp(min=0.01) right_gray_indicator = (regress_ranges[..., 1] < measure) & (measure <= gray_ranges[..., 1]) relaxation_score = left_gray_multiplier * left_gray_indicator + green_indicator + right_gray_multiplier * right_gray_indicator return relaxation_score @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)] bbox_pred_list = [bbox_preds[i][img_id].detach() for i in range(num_levels)] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] det_bboxes = self._get_bboxes_single( cls_score_list, bbox_pred_list, mlvl_points, img_shape, scale_factor, cfg, rescale, with_nms) result_list.append(det_bboxes) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_points, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for a single scale level with shape (num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for a single scale level with shape (num_points * 4, H, W). mlvl_points (list[Tensor]): Box reference for a single scale level with shape (num_total_points, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): BBox predictions in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (n,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds, mlvl_points): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if 0 < nms_pre < scores.shape[0]: max_scores, _ = scores.max(dim=1) _, topk_inds = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Set max number of box to be feed into nms in deployment deploy_nms_pre = cfg.get('deploy_nms_pre', -1) if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export(): max_scores, _ = mlvl_scores.max(dim=1) _, topk_inds = max_scores.topk(deploy_nms_pre) mlvl_scores = mlvl_scores[topk_inds, :] mlvl_bboxes = mlvl_bboxes[topk_inds, :] padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) if with_nms: det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=None) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores
27,448
46.001712
186
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/cascade_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. from __future__ import division import copy import warnings import torch import torch.nn as nn from mmcv import ConfigDict from mmcv.ops import DeformConv2d, batched_nms from mmcv.runner import BaseModule, ModuleList from mmdet.core import (RegionAssigner, build_assigner, build_sampler, images_to_levels, multi_apply) from mmdet.core.utils import select_single_mlvl from ..builder import HEADS, build_head from .base_dense_head import BaseDenseHead from .rpn_head import RPNHead class AdaptiveConv(BaseModule): """AdaptiveConv used to adapt the sampling location with the anchors. Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the conv kernel. Default: 3 stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 1 dilation (int or tuple, optional): Spacing between kernel elements. Default: 3 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional): If set True, adds a learnable bias to the output. Default: False. type (str, optional): Type of adaptive conv, can be either 'offset' (arbitrary anchors) or 'dilation' (uniform anchor). Default: 'dilation'. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=3, groups=1, bias=False, type='dilation', init_cfg=dict( type='Normal', std=0.01, override=dict(name='conv'))): super(AdaptiveConv, self).__init__(init_cfg) assert type in ['offset', 'dilation'] self.adapt_type = type assert kernel_size == 3, 'Adaptive conv only supports kernels 3' if self.adapt_type == 'offset': assert stride == 1 and padding == 1 and groups == 1, \ 'Adaptive conv offset mode only supports padding: {1}, ' \ f'stride: {1}, groups: {1}' self.conv = DeformConv2d( in_channels, out_channels, kernel_size, padding=padding, stride=stride, groups=groups, bias=bias) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, padding=dilation, dilation=dilation) def forward(self, x, offset): """Forward function.""" if self.adapt_type == 'offset': N, _, H, W = x.shape assert offset is not None assert H * W == offset.shape[1] # reshape [N, NA, 18] to (N, 18, H, W) offset = offset.permute(0, 2, 1).reshape(N, -1, H, W) offset = offset.contiguous() x = self.conv(x, offset) else: assert offset is None x = self.conv(x) return x @HEADS.register_module() class StageCascadeRPNHead(RPNHead): """Stage of CascadeRPNHead. Args: in_channels (int): Number of channels in the input feature map. anchor_generator (dict): anchor generator config. adapt_cfg (dict): adaptation config. bridged_feature (bool, optional): whether update rpn feature. Default: False. with_cls (bool, optional): whether use classification branch. Default: True. sampling (bool, optional): whether use sampling. Default: True. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[1.0], strides=[4, 8, 16, 32, 64]), adapt_cfg=dict(type='dilation', dilation=3), bridged_feature=False, with_cls=True, sampling=True, init_cfg=None, **kwargs): self.with_cls = with_cls self.anchor_strides = anchor_generator['strides'] self.anchor_scales = anchor_generator['scales'] self.bridged_feature = bridged_feature self.adapt_cfg = adapt_cfg super(StageCascadeRPNHead, self).__init__( in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) # override sampling and sampler self.sampling = sampling if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) if init_cfg is None: self.init_cfg = dict( type='Normal', std=0.01, override=[dict(name='rpn_reg')]) if self.with_cls: self.init_cfg['override'].append(dict(name='rpn_cls')) def _init_layers(self): """Init layers of a CascadeRPN stage.""" self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels, **self.adapt_cfg) if self.with_cls: self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) self.relu = nn.ReLU(inplace=True) def forward_single(self, x, offset): """Forward function of single scale.""" bridged_x = x x = self.relu(self.rpn_conv(x, offset)) if self.bridged_feature: bridged_x = x # update feature cls_score = self.rpn_cls(x) if self.with_cls else None bbox_pred = self.rpn_reg(x) return bridged_x, cls_score, bbox_pred def forward(self, feats, offset_list=None): """Forward function.""" if offset_list is None: offset_list = [None for _ in range(len(feats))] return multi_apply(self.forward_single, feats, offset_list) def _region_targets_single(self, anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, featmap_sizes, label_channels=1): """Get anchor targets based on region for single level.""" assign_result = self.assigner.assign( anchors, valid_flags, gt_bboxes, img_meta, featmap_sizes, self.anchor_scales[0], self.anchor_strides, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=None, allowed_border=self.train_cfg.allowed_border) flat_anchors = torch.cat(anchors) sampling_result = self.sampler.sample(assign_result, flat_anchors, gt_bboxes) num_anchors = flat_anchors.shape[0] bbox_targets = torch.zeros_like(flat_anchors) bbox_weights = torch.zeros_like(flat_anchors) labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long) label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def region_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, featmap_sizes, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """See :func:`StageCascadeRPNHead.get_targets`.""" num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._region_targets_single, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, featmap_sizes=featmap_sizes, label_channels=label_channels) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes, img_metas, featmap_sizes, gt_bboxes_ignore=None, label_channels=1): """Compute regression and classification targets for anchors. Args: anchor_list (list[list]): Multi level anchors of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. featmap_sizes (list[Tensor]): Feature mapsize each level gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images label_channels (int): Channel of label. Returns: cls_reg_targets (tuple) """ if isinstance(self.assigner, RegionAssigner): cls_reg_targets = self.region_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, featmap_sizes, gt_bboxes_ignore_list=gt_bboxes_ignore, label_channels=label_channels) else: cls_reg_targets = super(StageCascadeRPNHead, self).get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, label_channels=label_channels) return cls_reg_targets def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes): """ Get offset for deformable conv based on anchor shape NOTE: currently support deformable kernel_size=3 and dilation=1 Args: anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of multi-level anchors anchor_strides (list[int]): anchor stride of each level Returns: offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv kernel. """ def _shape_offset(anchors, stride, ks=3, dilation=1): # currently support kernel_size=3 and dilation=1 assert ks == 3 and dilation == 1 pad = (ks - 1) // 2 idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) yy, xx = torch.meshgrid(idx, idx) # return order matters xx = xx.reshape(-1) yy = yy.reshape(-1) w = (anchors[:, 2] - anchors[:, 0]) / stride h = (anchors[:, 3] - anchors[:, 1]) / stride w = w / (ks - 1) - dilation h = h / (ks - 1) - dilation offset_x = w[:, None] * xx # (NA, ks**2) offset_y = h[:, None] * yy # (NA, ks**2) return offset_x, offset_y def _ctr_offset(anchors, stride, featmap_size): feat_h, feat_w = featmap_size assert len(anchors) == feat_h * feat_w x = (anchors[:, 0] + anchors[:, 2]) * 0.5 y = (anchors[:, 1] + anchors[:, 3]) * 0.5 # compute centers on feature map x = x / stride y = y / stride # compute predefine centers xx = torch.arange(0, feat_w, device=anchors.device) yy = torch.arange(0, feat_h, device=anchors.device) yy, xx = torch.meshgrid(yy, xx) xx = xx.reshape(-1).type_as(x) yy = yy.reshape(-1).type_as(y) offset_x = x - xx # (NA, ) offset_y = y - yy # (NA, ) return offset_x, offset_y num_imgs = len(anchor_list) num_lvls = len(anchor_list[0]) dtype = anchor_list[0][0].dtype device = anchor_list[0][0].device num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] offset_list = [] for i in range(num_imgs): mlvl_offset = [] for lvl in range(num_lvls): c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl], anchor_strides[lvl], featmap_sizes[lvl]) s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl], anchor_strides[lvl]) # offset = ctr_offset + shape_offset offset_x = s_offset_x + c_offset_x[:, None] offset_y = s_offset_y + c_offset_y[:, None] # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9) offset = torch.stack([offset_y, offset_x], dim=-1) offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2] mlvl_offset.append(offset) offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2] offset_list = images_to_levels(offset_list, num_level_anchors) return offset_list def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Loss function on single scale.""" # classification loss if self.with_cls: labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. anchors = anchors.reshape(-1, 4) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_reg = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) if self.with_cls: return loss_cls, loss_reg return None, loss_reg def loss(self, anchor_list, valid_flag_list, cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: anchor_list (list[list]): Multi level anchors of each image. cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, featmap_sizes, gt_bboxes_ignore=gt_bboxes_ignore, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets if self.sampling: num_total_samples = num_total_pos + num_total_neg else: # 200 is hard-coded average factor, # which follows guided anchoring. num_total_samples = sum([label.numel() for label in labels_list]) / 200.0 # change per image, per level anchor_list to per_level, per_image mlvl_anchor_list = list(zip(*anchor_list)) # concat mlvl_anchor_list mlvl_anchor_list = [ torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list ] losses = multi_apply( self.loss_single, cls_scores, bbox_preds, mlvl_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) if self.with_cls: return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1]) return dict(loss_rpn_reg=losses[1]) def get_bboxes(self, anchor_list, cls_scores, bbox_preds, img_metas, cfg, rescale=False): """Get proposal predict. Args: anchor_list (list[list]): Multi level anchors of each image. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). img_metas (list[dict], Optional): Image meta info. Default None. cfg (mmcv.Config, Optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ assert len(cls_scores) == len(bbox_preds) result_list = [] for img_id in range(len(img_metas)): cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, anchor_list[img_id], img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """Transform outputs of a single image into bbox predictions. Args: cls_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_anchors * 4, H, W). mlvl_anchors (list[Tensor]): Box reference from all scale levels of a single image, each item has shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default False. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) # bboxes from different level should be independent during NMS, # level_ids are used as labels for batched NMS to separate them level_ids = [] mlvl_scores = [] mlvl_bbox_preds = [] mlvl_valid_anchors = [] nms_pre = cfg.get('nms_pre', -1) for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # We set FG labels to [0, num_class-1] and BG label to # num_class in RPN head since mmdet v2.5, which is unified to # be consistent with other head since mmdet v2.0. In mmdet v2.0 # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = rpn_cls_score.softmax(dim=1)[:, 0] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) anchors = mlvl_anchors[idx] if 0 < nms_pre < scores.shape[0]: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:nms_pre] scores = ranked_scores[:nms_pre] rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] mlvl_scores.append(scores) mlvl_bbox_preds.append(rpn_bbox_pred) mlvl_valid_anchors.append(anchors) level_ids.append( scores.new_full((scores.size(0), ), idx, dtype=torch.long)) scores = torch.cat(mlvl_scores) anchors = torch.cat(mlvl_valid_anchors) rpn_bbox_pred = torch.cat(mlvl_bbox_preds) proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) ids = torch.cat(level_ids) if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] ids = ids[valid_mask] # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You ' \ f'set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ 'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ f' iou_threshold in nms and ' \ f'nms_thr at the same time, but get' \ f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' if proposals.numel() > 0: dets, _ = batched_nms(proposals, scores, ids, cfg.nms) else: return proposals.new_zeros(0, 5) return dets[:cfg.max_per_img] def refine_bboxes(self, anchor_list, bbox_preds, img_metas): """Refine bboxes through stages.""" num_levels = len(bbox_preds) new_anchor_list = [] for img_id in range(len(img_metas)): mlvl_anchors = [] for i in range(num_levels): bbox_pred = bbox_preds[i][img_id].detach() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) img_shape = img_metas[img_id]['img_shape'] bboxes = self.bbox_coder.decode(anchor_list[img_id][i], bbox_pred, img_shape) mlvl_anchors.append(bboxes) new_anchor_list.append(mlvl_anchors) return new_anchor_list @HEADS.register_module() class CascadeRPNHead(BaseDenseHead): """The CascadeRPNHead will predict more accurate region proposals, which is required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN consists of a sequence of RPNStage to progressively improve the accuracy of the detected proposals. More details can be found in ``https://arxiv.org/abs/1909.06720``. Args: num_stages (int): number of CascadeRPN stages. stages (list[dict]): list of configs to build the stages. train_cfg (list[dict]): list of configs at training time each stage. test_cfg (dict): config at testing time. """ def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=None): super(CascadeRPNHead, self).__init__(init_cfg) assert num_stages == len(stages) self.num_stages = num_stages # Be careful! Pretrained weights cannot be loaded when use # nn.ModuleList self.stages = ModuleList() for i in range(len(stages)): train_cfg_i = train_cfg[i] if train_cfg is not None else None stages[i].update(train_cfg=train_cfg_i) stages[i].update(test_cfg=test_cfg) self.stages.append(build_head(stages[i])) self.train_cfg = train_cfg self.test_cfg = test_cfg def loss(self): """loss() is implemented in StageCascadeRPNHead.""" pass def get_bboxes(self): """get_bboxes() is implemented in StageCascadeRPNHead.""" pass def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None): """Forward train function.""" assert gt_labels is None, 'RPN does not require gt_labels' featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, valid_flag_list = self.stages[0].get_anchors( featmap_sizes, img_metas, device=device) losses = dict() for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred, gt_bboxes, img_metas) stage_loss = stage.loss(*rpn_loss_inputs) for name, value in stage_loss.items(): losses['s{}.{}'.format(i, name)] = value # refine boxes if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, img_metas) if proposal_cfg is None: return losses else: proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, bbox_pred, img_metas, self.test_cfg) return losses, proposal_list def simple_test_rpn(self, x, img_metas): """Simple forward test function.""" featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, _ = self.stages[0].get_anchors( featmap_sizes, img_metas, device=device) for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, img_metas) proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, bbox_pred, img_metas, self.test_cfg) return proposal_list def aug_test_rpn(self, x, img_metas): """Augmented forward test function.""" raise NotImplementedError( 'CascadeRPNHead does not support test-time augmentation')
33,996
41.390274
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/vfnet_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmcv.ops import DeformConv2d from mmcv.runner import force_fp32 from mmdet.core import (MlvlPointGenerator, bbox_overlaps, build_assigner, build_prior_generator, build_sampler, multi_apply, reduce_mean) from ..builder import HEADS, build_loss from .atss_head import ATSSHead from .fcos_head import FCOSHead INF = 1e8 @HEADS.register_module() class VFNetHead(ATSSHead, FCOSHead): """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object Detector.<https://arxiv.org/abs/2008.13367>`_. The VFNet predicts IoU-aware classification scores which mix the object presence confidence and object localization accuracy as the detection score. It is built on the FCOS architecture and uses ATSS for defining positive/negative training examples. The VFNet is trained with Varifocal Loss and empolys star-shaped deformable convolution to extract features for a bbox. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. regress_ranges (tuple[tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Default: False. center_sample_radius (float): Radius of center sampling. Default: 1.5. sync_num_pos (bool): If true, synchronize the number of positive examples across GPUs. Default: True gradient_mul (float): The multiplier to gradients from bbox refinement and recognition. Default: 0.1. bbox_norm_type (str): The bbox normalization type, 'reg_denom' or 'stride'. Default: reg_denom loss_cls_fl (dict): Config of focal loss. use_vfl (bool): If true, use varifocal loss for training. Default: True. loss_cls (dict): Config of varifocal loss. loss_bbox (dict): Config of localization loss, GIoU Loss. loss_bbox (dict): Config of localization refinement loss, GIoU Loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). use_atss (bool): If true, use ATSS to define positive/negative examples. Default: True. anchor_generator (dict): Config of anchor generator for ATSS. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> self = VFNetHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes, in_channels, regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling=False, center_sample_radius=1.5, sync_num_pos=True, gradient_mul=0.1, bbox_norm_type='reg_denom', loss_cls_fl=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), use_vfl=True, loss_cls=dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.5), loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), use_atss=True, reg_decoded_bbox=True, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, center_offset=0.0, strides=[8, 16, 32, 64, 128]), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='vfnet_cls', std=0.01, bias_prob=0.01)), **kwargs): # dcn base offsets, adapted from reppoints_head.py self.num_dconv_points = 9 self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super(FCOSHead, self).__init__( num_classes, in_channels, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.regress_ranges = regress_ranges self.reg_denoms = [ regress_range[-1] for regress_range in regress_ranges ] self.reg_denoms[-1] = self.reg_denoms[-2] * 2 self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.sync_num_pos = sync_num_pos self.bbox_norm_type = bbox_norm_type self.gradient_mul = gradient_mul self.use_vfl = use_vfl if self.use_vfl: self.loss_cls = build_loss(loss_cls) else: self.loss_cls = build_loss(loss_cls_fl) self.loss_bbox = build_loss(loss_bbox) self.loss_bbox_refine = build_loss(loss_bbox_refine) # for getting ATSS targets self.use_atss = use_atss self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.anchor_center_offset = anchor_generator['center_offset'] self.num_base_priors = self.prior_generator.num_base_priors[0] self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) # only be used in `get_atss_targets` when `use_atss` is True self.atss_prior_generator = build_prior_generator(anchor_generator) self.fcos_prior_generator = MlvlPointGenerator( anchor_generator['strides'], self.anchor_center_offset if self.use_atss else 0.5) # In order to reuse the `get_bboxes` in `BaseDenseHead. # Only be used in testing phase. self.prior_generator = self.fcos_prior_generator @property def num_anchors(self): """ Returns: int: Number of anchors on each point of feature map. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors @property def anchor_generator(self): warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' 'please use "atss_prior_generator" instead') return self.prior_generator def _init_layers(self): """Initialize layers of the head.""" super(FCOSHead, self)._init_cls_convs() super(FCOSHead, self)._init_reg_convs() self.relu = nn.ReLU(inplace=True) self.vfnet_reg_conv = ConvModule( self.feat_channels, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias) self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_reg_refine_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_cls_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, feats, self.scales, self.scales_refine, self.strides, self.reg_denoms) def forward_single(self, x, scale, scale_refine, stride, reg_denom): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the refined bbox prediction. stride (int): The corresponding stride for feature maps, used to normalize the bbox prediction when bbox_norm_type = 'stride'. reg_denom (int): The corresponding regression range for feature maps, only used to normalize the bbox prediction when bbox_norm_type = 'reg_denom'. Returns: tuple: iou-aware cls scores for each box, bbox predictions and refined bbox predictions of input feature maps. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) # predict the bbox_pred of different level reg_feat_init = self.vfnet_reg_conv(reg_feat) if self.bbox_norm_type == 'reg_denom': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom elif self.bbox_norm_type == 'stride': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * stride else: raise NotImplementedError # compute star deformable convolution offsets # converting dcn_offset to reg_feat.dtype thus VFNet can be # trained with FP16 dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, stride).to(reg_feat.dtype) # refine the bbox_pred reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) bbox_pred_refine = scale_refine( self.vfnet_reg_refine(reg_feat)).float().exp() bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() # predict the iou-aware cls score cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) cls_score = self.vfnet_cls(cls_feat) if self.training: return cls_score, bbox_pred, bbox_pred_refine else: return cls_score, bbox_pred_refine def star_dcn_offset(self, bbox_pred, gradient_mul, stride): """Compute the star deformable conv offsets. Args: bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). gradient_mul (float): Gradient multiplier. stride (int): The corresponding stride for feature maps, used to project the bbox onto the feature map. Returns: dcn_offsets (Tensor): The offsets for deformable convolution. """ dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ gradient_mul * bbox_pred # map to the feature map scale bbox_pred_grad_mul = bbox_pred_grad_mul / stride N, C, H, W = bbox_pred.size() x1 = bbox_pred_grad_mul[:, 0, :, :] y1 = bbox_pred_grad_mul[:, 1, :, :] x2 = bbox_pred_grad_mul[:, 2, :, :] y2 = bbox_pred_grad_mul[:, 3, :, :] bbox_pred_grad_mul_offset = bbox_pred.new_zeros( N, 2 * self.num_dconv_points, H, W) bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset return dcn_offset @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) def loss(self, cls_scores, bbox_preds, bbox_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.fcos_prior_generator.grid_priors( featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) labels, label_weights, bbox_targets, bbox_weights = self.get_targets( cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and bbox_preds_refine flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels).contiguous() for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred in bbox_preds ] flatten_bbox_preds_refine = [ bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred_refine in bbox_preds_refine ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = torch.where( ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] num_pos = len(pos_inds) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] pos_labels = flatten_labels[pos_inds] # sync num_pos across all gpus if self.sync_num_pos: num_pos_avg_per_gpu = reduce_mean( pos_inds.new_tensor(num_pos).float()).item() num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) else: num_pos_avg_per_gpu = num_pos pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) iou_targets_ini = bbox_overlaps( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_ini = iou_targets_ini.clone().detach() bbox_avg_factor_ini = reduce_mean( bbox_weights_ini.sum()).clamp_(min=1).item() pos_decoded_bbox_preds_refine = \ self.bbox_coder.decode(pos_points, pos_bbox_preds_refine) iou_targets_rf = bbox_overlaps( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_rf = iou_targets_rf.clone().detach() bbox_avg_factor_rf = reduce_mean( bbox_weights_rf.sum()).clamp_(min=1).item() if num_pos > 0: loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), weight=bbox_weights_ini, avg_factor=bbox_avg_factor_ini) loss_bbox_refine = self.loss_bbox_refine( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), weight=bbox_weights_rf, avg_factor=bbox_avg_factor_rf) # build IoU-aware cls_score targets if self.use_vfl: pos_ious = iou_targets_rf.clone().detach() cls_iou_targets = torch.zeros_like(flatten_cls_scores) cls_iou_targets[pos_inds, pos_labels] = pos_ious else: loss_bbox = pos_bbox_preds.sum() * 0 loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 if self.use_vfl: cls_iou_targets = torch.zeros_like(flatten_cls_scores) if self.use_vfl: loss_cls = self.loss_cls( flatten_cls_scores, cls_iou_targets, avg_factor=num_pos_avg_per_gpu) else: loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, weight=label_weights, avg_factor=num_pos_avg_per_gpu) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_bbox_rf=loss_bbox_refine) def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore): """A wrapper for computing ATSS and FCOS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Returns: tuple: labels_list (list[Tensor]): Labels of each level. label_weights (Tensor/None): Label weights of all levels. bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). bbox_weights (Tensor/None): Bbox weights of all levels. """ if self.use_atss: return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) else: self.norm_on_bbox = False return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels) def _get_target_single(self, *args, **kwargs): """Avoid ambiguity in multiple inheritance.""" if self.use_atss: return ATSSHead._get_target_single(self, *args, **kwargs) else: return FCOSHead._get_target_single(self, *args, **kwargs) def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute FCOS regression and classification targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: labels (list[Tensor]): Labels of each level. label_weights: None, to be compatible with ATSS targets. bbox_targets (list[Tensor]): BBox targets of each level. bbox_weights: None, to be compatible with ATSS targets. """ labels, bbox_targets = FCOSHead.get_targets(self, points, gt_bboxes_list, gt_labels_list) label_weights = None bbox_weights = None return labels, label_weights, bbox_targets, bbox_weights def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): Device for returned tensors Returns: tuple: anchor_list (list[Tensor]): Anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.atss_prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.atss_prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device=device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def get_atss_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """A wrapper for computing ATSS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Default: None. Returns: tuple: labels_list (list[Tensor]): Labels of each level. label_weights (Tensor): Label weights of all levels. bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). bbox_weights (Tensor): Bbox weights of all levels. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len( featmap_sizes ) == self.atss_prior_generator.num_levels == \ self.fcos_prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = ATSSHead.get_targets( self, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, unmap_outputs=True) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets bbox_targets_list = [ bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list ] num_imgs = len(img_metas) # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format bbox_targets_list = self.transform_bbox_targets( bbox_targets_list, mlvl_points, num_imgs) labels_list = [labels.reshape(-1) for labels in labels_list] label_weights_list = [ label_weights.reshape(-1) for label_weights in label_weights_list ] bbox_weights_list = [ bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list ] label_weights = torch.cat(label_weights_list) bbox_weights = torch.cat(bbox_weights_list) return labels_list, label_weights, bbox_targets_list, bbox_weights def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs): """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. Args: decoded_bboxes (list[Tensor]): Regression targets of each level, in the form of (x1, y1, x2, y2). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). num_imgs (int): the number of images in a batch. Returns: bbox_targets (list[Tensor]): Regression targets of each level in the form of (l, t, r, b). """ # TODO: Re-implemented in Class PointCoder assert len(decoded_bboxes) == len(mlvl_points) num_levels = len(decoded_bboxes) mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] bbox_targets = [] for i in range(num_levels): bbox_target = self.bbox_coder.encode(mlvl_points[i], decoded_bboxes[i]) bbox_targets.append(bbox_target) return bbox_targets def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Override the method in the parent class to avoid changing para's name.""" pass def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map size. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `VFNetHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map' 'with `self.fcos_prior_generator.single_level_grid_priors` ') h, w = featmap_size x_range = torch.arange( 0, w * stride, stride, dtype=dtype, device=device) y_range = torch.arange( 0, h * stride, stride, dtype=dtype, device=device) y, x = torch.meshgrid(y_range, x_range) # to be compatible with anchor points in ATSS if self.use_atss: points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + \ stride * self.anchor_center_offset else: points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 return points
31,290
41.22807
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/centernet_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import bias_init_with_prob, normal_init from mmcv.ops import batched_nms from mmcv.runner import force_fp32 from mmdet.core import multi_apply from mmdet.models import HEADS, build_loss from mmdet.models.utils import gaussian_radius, gen_gaussian_target from ..utils.gaussian_target import (get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class CenterNetHead(BaseDenseHead, BBoxTestMixin): """Objects as Points Head. CenterHead use center_point to indicate object's position. Paper link <https://arxiv.org/abs/1904.07850> Args: in_channel (int): Number of channel in the input feature map. feat_channel (int): Number of channel in the intermediate feature map. num_classes (int): Number of categories excluding the background category. loss_center_heatmap (dict | None): Config of center heatmap loss. Default: GaussianFocalLoss. loss_wh (dict | None): Config of wh loss. Default: L1Loss. loss_offset (dict | None): Config of offset loss. Default: L1Loss. train_cfg (dict | None): Training config. Useless in CenterNet, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CenterNet. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channel, feat_channel, num_classes, loss_center_heatmap=dict( type='GaussianFocalLoss', loss_weight=1.0), loss_wh=dict(type='L1Loss', loss_weight=0.1), loss_offset=dict(type='L1Loss', loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=None): super(CenterNetHead, self).__init__(init_cfg) self.num_classes = num_classes self.heatmap_head = self._build_head(in_channel, feat_channel, num_classes) self.wh_head = self._build_head(in_channel, feat_channel, 2) self.offset_head = self._build_head(in_channel, feat_channel, 2) self.loss_center_heatmap = build_loss(loss_center_heatmap) self.loss_wh = build_loss(loss_wh) self.loss_offset = build_loss(loss_offset) self.train_cfg = train_cfg self.test_cfg = test_cfg self.fp16_enabled = False def _build_head(self, in_channel, feat_channel, out_channel): """Build head for each branch.""" layer = nn.Sequential( nn.Conv2d(in_channel, feat_channel, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(feat_channel, out_channel, kernel_size=1)) return layer def init_weights(self): """Initialize weights of the head.""" bias_init = bias_init_with_prob(0.1) self.heatmap_head[-1].bias.data.fill_(bias_init) for head in [self.wh_head, self.offset_head]: for m in head.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) def forward(self, feats): """Forward features. Notice CenterNet head does not use FPN. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: center_heatmap_preds (List[Tensor]): center predict heatmaps for all levels, the channels number is num_classes. wh_preds (List[Tensor]): wh predicts for all levels, the channels number is 2. offset_preds (List[Tensor]): offset predicts for all levels, the channels number is 2. """ return multi_apply(self.forward_single, feats) def forward_single(self, feat): """Forward feature of a single level. Args: feat (Tensor): Feature of a single level. Returns: center_heatmap_pred (Tensor): center predict heatmaps, the channels number is num_classes. wh_pred (Tensor): wh predicts, the channels number is 2. offset_pred (Tensor): offset predicts, the channels number is 2. """ center_heatmap_pred = self.heatmap_head(feat).sigmoid() wh_pred = self.wh_head(feat) offset_pred = self.offset_head(feat) return center_heatmap_pred, wh_pred, offset_pred @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) def loss(self, center_heatmap_preds, wh_preds, offset_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: center_heatmap_preds (list[Tensor]): center predict heatmaps for all levels with shape (B, num_classes, H, W). wh_preds (list[Tensor]): wh predicts for all levels with shape (B, 2, H, W). offset_preds (list[Tensor]): offset predicts for all levels with shape (B, 2, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: which has components below: - loss_center_heatmap (Tensor): loss of center heatmap. - loss_wh (Tensor): loss of hw heatmap - loss_offset (Tensor): loss of offset heatmap. """ assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 center_heatmap_pred = center_heatmap_preds[0] wh_pred = wh_preds[0] offset_pred = offset_preds[0] target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels, center_heatmap_pred.shape, img_metas[0]['pad_shape']) center_heatmap_target = target_result['center_heatmap_target'] wh_target = target_result['wh_target'] offset_target = target_result['offset_target'] wh_offset_target_weight = target_result['wh_offset_target_weight'] # Since the channel of wh_target and offset_target is 2, the avg_factor # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset. loss_center_heatmap = self.loss_center_heatmap( center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor) loss_wh = self.loss_wh( wh_pred, wh_target, wh_offset_target_weight, avg_factor=avg_factor * 2) loss_offset = self.loss_offset( offset_pred, offset_target, wh_offset_target_weight, avg_factor=avg_factor * 2) return dict( loss_center_heatmap=loss_center_heatmap, loss_wh=loss_wh, loss_offset=loss_offset) def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape): """Compute regression and classification targets in multiple images. Args: gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box. feat_shape (list[int]): feature map shape with value [B, _, H, W] img_shape (list[int]): image shape in [h, w] format. Returns: tuple[dict,float]: The float value is mean avg_factor, the dict has components below: - center_heatmap_target (Tensor): targets of center heatmap, \ shape (B, num_classes, H, W). - wh_target (Tensor): targets of wh predict, shape \ (B, 2, H, W). - offset_target (Tensor): targets of offset predict, shape \ (B, 2, H, W). - wh_offset_target_weight (Tensor): weights of wh and offset \ predict, shape (B, 2, H, W). """ img_h, img_w = img_shape[:2] bs, _, feat_h, feat_w = feat_shape width_ratio = float(feat_w / img_w) height_ratio = float(feat_h / img_h) center_heatmap_target = gt_bboxes[-1].new_zeros( [bs, self.num_classes, feat_h, feat_w]) wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) wh_offset_target_weight = gt_bboxes[-1].new_zeros( [bs, 2, feat_h, feat_w]) for batch_id in range(bs): gt_bbox = gt_bboxes[batch_id] gt_label = gt_labels[batch_id] center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2 center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2 gt_centers = torch.cat((center_x, center_y), dim=1) for j, ct in enumerate(gt_centers): ctx_int, cty_int = ct.int() ctx, cty = ct scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio radius = gaussian_radius([scale_box_h, scale_box_w], min_overlap=0.3) radius = max(0, int(radius)) ind = gt_label[j] gen_gaussian_target(center_heatmap_target[batch_id, ind], [ctx_int, cty_int], radius) wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1 avg_factor = max(1, center_heatmap_target.eq(1).sum()) target_result = dict( center_heatmap_target=center_heatmap_target, wh_target=wh_target, offset_target=offset_target, wh_offset_target_weight=wh_offset_target_weight) return target_result, avg_factor @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) def get_bboxes(self, center_heatmap_preds, wh_preds, offset_preds, img_metas, rescale=True, with_nms=False): """Transform network output for a batch into bbox predictions. Args: center_heatmap_preds (list[Tensor]): Center predict heatmaps for all levels with shape (B, num_classes, H, W). wh_preds (list[Tensor]): WH predicts for all levels with shape (B, 2, H, W). offset_preds (list[Tensor]): Offset predicts for all levels with shape (B, 2, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: True. with_nms (bool): If True, do nms before return boxes. Default: False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. The shape of the second tensor in the tuple is (n,), and each element represents the class label of the corresponding box. """ assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( center_heatmap_preds[0][img_id:img_id + 1, ...], wh_preds[0][img_id:img_id + 1, ...], offset_preds[0][img_id:img_id + 1, ...], img_metas[img_id], rescale=rescale, with_nms=with_nms)) return result_list def _get_bboxes_single(self, center_heatmap_pred, wh_pred, offset_pred, img_meta, rescale=False, with_nms=True): """Transform outputs of a single image into bbox results. Args: center_heatmap_pred (Tensor): Center heatmap for current level with shape (1, num_classes, H, W). wh_pred (Tensor): WH heatmap for current level with shape (1, num_classes, H, W). offset_pred (Tensor): Offset for current level with shape (1, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor, Tensor]: The first item is an (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. The shape of the second tensor in the tuple is (n,), and each element represents the class label of the corresponding box. """ batch_det_bboxes, batch_labels = self.decode_heatmap( center_heatmap_pred, wh_pred, offset_pred, img_meta['batch_input_shape'], k=self.test_cfg.topk, kernel=self.test_cfg.local_maximum_kernel) det_bboxes = batch_det_bboxes.view([-1, 5]) det_labels = batch_labels.view(-1) batch_border = det_bboxes.new_tensor(img_meta['border'])[..., [2, 0, 2, 0]] det_bboxes[..., :4] -= batch_border if rescale: det_bboxes[..., :4] /= det_bboxes.new_tensor( img_meta['scale_factor']) if with_nms: det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, self.test_cfg) return det_bboxes, det_labels def decode_heatmap(self, center_heatmap_pred, wh_pred, offset_pred, img_shape, k=100, kernel=3): """Transform outputs into detections raw bbox prediction. Args: center_heatmap_pred (Tensor): center predict heatmap, shape (B, num_classes, H, W). wh_pred (Tensor): wh predict, shape (B, 2, H, W). offset_pred (Tensor): offset predict, shape (B, 2, H, W). img_shape (list[int]): image shape in [h, w] format. k (int): Get top k center keypoints from heatmap. Default 100. kernel (int): Max pooling kernel for extract local maximum pixels. Default 3. Returns: tuple[torch.Tensor]: Decoded output of CenterNetHead, containing the following Tensors: - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5) - batch_topk_labels (Tensor): Categories of each box with \ shape (B, k) """ height, width = center_heatmap_pred.shape[2:] inp_h, inp_w = img_shape center_heatmap_pred = get_local_maximum( center_heatmap_pred, kernel=kernel) *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( center_heatmap_pred, k=k) batch_scores, batch_index, batch_topk_labels = batch_dets wh = transpose_and_gather_feat(wh_pred, batch_index) offset = transpose_and_gather_feat(offset_pred, batch_index) topk_xs = topk_xs + offset[..., 0] topk_ys = topk_ys + offset[..., 1] tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width) tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height) br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width) br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height) batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2) batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]), dim=-1) return batch_bboxes, batch_topk_labels def _bboxes_nms(self, bboxes, labels, cfg): if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels
18,025
42.646489
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/fsaf_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, unmap) from ..builder import HEADS from ..losses.accuracy import accuracy from ..losses.utils import weight_reduce_loss from .retina_head import RetinaHead @HEADS.register_module() class FSAFHead(RetinaHead): """Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors (num_anchors is 1 for anchor- free methods) Args: *args: Same as its base class in :class:`RetinaHead` score_threshold (float, optional): The score_threshold to calculate positive recall. If given, prediction scores lower than this value is counted as incorrect prediction. Default to None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None **kwargs: Same as its base class in :class:`RetinaHead` Example: >>> import torch >>> self = FSAFHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == self.num_classes >>> assert box_per_anchor == 4 """ def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs): # The positive bias in self.retina_reg conv is to prevent predicted \ # bbox with 0 area if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_reg', std=0.01, bias=0.25) ]) super().__init__(*args, init_cfg=init_cfg, **kwargs) self.score_threshold = score_threshold def forward_single(self, x): """Forward feature map of a single scale level. Args: x (Tensor): Feature map of a single scale level. Returns: tuple (Tensor): cls_score (Tensor): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). """ cls_score, bbox_pred = super().forward_single(x) # relu: TBLR encoder only accepts positive bbox_pred return cls_score, self.relu(bbox_pred) def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Most of the codes are the same with the base class :obj: `AnchorHead`, except that it also collects and returns the matched gt index in the image (from 0 to num_gt-1). If the anchor bbox is not matched to any gt, the corresponding value in pos_gt_inds is -1. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # Assign gt and sample anchors anchors = flat_anchors[inside_flags.type(torch.bool), :] assign_result = self.assigner.assign( anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros((num_valid_anchors, label_channels), dtype=torch.float) pos_gt_inds = anchors.new_full((num_valid_anchors, ), -1, dtype=torch.long) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, both # the predicted boxes and regression targets should be with # absolute coordinate format. pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # The assigned gt_index for each anchor. (0-based) pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # shadowed_labels is a tensor composed of tuples # (anchor_inds, class_label) that indicate those anchors lying in the # outer region of a gt or overlapped by another gt with a smaller # area. # # Therefore, only the shadowed labels are ignored for loss calculation. # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner` shadowed_labels = assign_result.get_extra_property('shadowed_labels') if shadowed_labels is not None and shadowed_labels.numel(): if len(shadowed_labels.shape) == 2: idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1] assert (labels[idx_] != label_).all(), \ 'One label cannot be both positive and ignored' label_weights[idx_, label_] = 0 else: label_weights[shadowed_labels] = 0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) pos_gt_inds = unmap( pos_gt_inds, num_total_anchors, inside_flags, fill=-1) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result, pos_gt_inds) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ for i in range(len(bbox_preds)): # loop over fpn level # avoid 0 area of the predicted bbox bbox_preds[i] = bbox_preds[i].clamp(min=1e-4) # TODO: It may directly use the base-class loss function. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels batch_size = len(gt_bboxes) device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, pos_assigned_gt_inds_list) = cls_reg_targets num_gts = np.array(list(map(len, gt_labels))) num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned # gt index of each anchor bbox in each fpn level. cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size for i, assign in enumerate(pos_assigned_gt_inds_list): # loop over fpn levels for j in range(1, batch_size): # loop over batch size # Convert gt indices in each img to those in the batch assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1]) pos_assigned_gt_inds_list[i] = assign.flatten() labels_list[i] = labels_list[i].flatten() num_gts = sum(map(len, gt_labels)) # total number of gt in the batch # The unique label index of each gt in the batch label_sequence = torch.arange(num_gts, device=device) # Collect the average loss of each gt in each level with torch.no_grad(): loss_levels, = multi_apply( self.collect_loss_level_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_seq=label_sequence) # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level loss_levels = torch.stack(loss_levels, dim=0) # Locate the best fpn level for loss back-propagation if loss_levels.numel() == 0: # zero gt argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long) else: _, argmin = loss_levels.min(dim=0) # Reweight the loss of each (anchor, label) pair, so that only those # at the best gt level are back-propagated. losses_cls, losses_bbox, pos_inds = multi_apply( self.reweight_loss_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_list, list(range(len(losses_cls))), min_levels=argmin) num_pos = torch.cat(pos_inds, 0).sum().float() pos_recall = self.calculate_pos_recall(cls_scores, labels_list, pos_inds) if num_pos == 0: # No gt avg_factor = num_pos + float(num_total_neg) else: avg_factor = num_pos for i in range(len(losses_cls)): losses_cls[i] /= avg_factor losses_bbox[i] /= avg_factor return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, num_pos=num_pos / batch_size, pos_recall=pos_recall) def calculate_pos_recall(self, cls_scores, labels_list, pos_inds): """Calculate positive recall with score threshold. Args: cls_scores (list[Tensor]): Classification scores at all fpn levels. Each tensor is in shape (N, num_classes * num_anchors, H, W) labels_list (list[Tensor]): The label that each anchor is assigned to. Shape (N * H * W * num_anchors, ) pos_inds (list[Tensor]): List of bool tensors indicating whether the anchor is assigned to a positive label. Shape (N * H * W * num_anchors, ) Returns: Tensor: A single float number indicating the positive recall. """ with torch.no_grad(): num_class = self.num_classes scores = [ cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos] for cls, pos in zip(cls_scores, pos_inds) ] labels = [ label.reshape(-1)[pos] for label, pos in zip(labels_list, pos_inds) ] scores = torch.cat(scores, dim=0) labels = torch.cat(labels, dim=0) if self.use_sigmoid_cls: scores = scores.sigmoid() else: scores = scores.softmax(dim=1) return accuracy(scores, labels, thresh=self.score_threshold) def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds, labels_seq): """Get the average loss in each FPN level w.r.t. each gt label. Args: cls_loss (Tensor): Classification loss of each feature map pixel, shape (num_anchor, num_class) reg_loss (Tensor): Regression loss of each feature map pixel, shape (num_anchor, 4) assigned_gt_inds (Tensor): It indicates which gt the prior is assigned to (0-based, -1: no assignment). shape (num_anchor), labels_seq: The rank of labels. shape (num_gt) Returns: shape: (num_gt), average loss of each gt in this level """ if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4) reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims if len(cls_loss.shape) == 2: cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims loss = cls_loss + reg_loss assert loss.size(0) == assigned_gt_inds.size(0) # Default loss value is 1e6 for a layer where no anchor is positive # to ensure it will not be chosen to back-propagate gradient losses_ = loss.new_full(labels_seq.shape, 1e6) for i, l in enumerate(labels_seq): match = assigned_gt_inds == l if match.any(): losses_[i] = loss[match].mean() return losses_, def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds, labels, level, min_levels): """Reweight loss values at each level. Reassign loss values at each level by masking those where the pre-calculated loss is too large. Then return the reduced losses. Args: cls_loss (Tensor): Element-wise classification loss. Shape: (num_anchors, num_classes) reg_loss (Tensor): Element-wise regression loss. Shape: (num_anchors, 4) assigned_gt_inds (Tensor): The gt indices that each anchor bbox is assigned to. -1 denotes a negative anchor, otherwise it is the gt index (0-based). Shape: (num_anchors, ), labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ). level (int): The current level index in the pyramid (0-4 for RetinaNet) min_levels (Tensor): The best-matching level for each gt. Shape: (num_gts, ), Returns: tuple: - cls_loss: Reduced corrected classification loss. Scalar. - reg_loss: Reduced corrected regression loss. Scalar. - pos_flags (Tensor): Corrected bool tensor indicating the final positive anchors. Shape: (num_anchors, ). """ loc_weight = torch.ones_like(reg_loss) cls_weight = torch.ones_like(cls_loss) pos_flags = assigned_gt_inds >= 0 # positive pixel flag pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten() if pos_flags.any(): # pos pixels exist pos_assigned_gt_inds = assigned_gt_inds[pos_flags] zeroing_indices = (min_levels[pos_assigned_gt_inds] != level) neg_indices = pos_indices[zeroing_indices] if neg_indices.numel(): pos_flags[neg_indices] = 0 loc_weight[neg_indices] = 0 # Only the weight corresponding to the label is # zeroed out if not selected zeroing_labels = labels[neg_indices] assert (zeroing_labels >= 0).all() cls_weight[neg_indices, zeroing_labels] = 0 # Weighted loss for both cls and reg loss cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum') reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum') return cls_loss, reg_loss, pos_flags
19,337
43.557604
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/atss_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, images_to_levels, multi_apply, reduce_mean, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead @HEADS.register_module() class ATSSHead(AnchorHead): """Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection. ATSS head structure is similar with FCOS, however ATSS use anchor boxes and assign label by Adaptive Training Sample Selection instead max-iou. https://arxiv.org/abs/1912.02424 """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), reg_decoded_bbox=True, loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='atss_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(ATSSHead, self).__init__( num_classes, in_channels, reg_decoded_bbox=reg_decoded_bbox, init_cfg=init_cfg, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.loss_centerness = build_loss(loss_centerness) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.atss_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.atss_centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. centerness (Tensor): Centerness for a single scale level, the channel number is (N, num_anchors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() centerness = self.atss_centerness(reg_feat) return cls_score, bbox_pred, centerness def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels, label_weights, bbox_targets, num_total_samples): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). num_total_samples (int): Number os positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) centerness = centerness.permute(0, 2, 3, 1).reshape(-1) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # classification loss loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_centerness = centerness[pos_inds] centerness_targets = self.centerness_target( pos_anchors, pos_bbox_targets) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_bbox_targets, weight=centerness_targets, avg_factor=1.0) # centerness loss loss_centerness = self.loss_centerness( pos_centerness, centerness_targets, avg_factor=num_total_samples) else: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, loss_centerness,\ bbox_avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, centernesses, labels_list, label_weights_list, bbox_targets_list, num_total_samples=num_total_samples) bbox_avg_factor = sum(bbox_avg_factor) bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness) def centerness_target(self, anchors, gts): # only calculate pos centerness targets, otherwise there may be nan anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l_ = anchors_cx - gts[:, 0] t_ = anchors_cy - gts[:, 1] r_ = gts[:, 2] - anchors_cx b_ = gts[:, 3] - anchors_cy left_right = torch.stack([l_, r_], dim=1) top_bottom = torch.stack([t_, b_], dim=1) centerness = torch.sqrt( (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) assert not torch.isnan(centerness).any() return centerness def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for ATSS head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if self.reg_decoded_bbox: pos_bbox_targets = sampling_result.pos_gt_bboxes else: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
20,844
41.281947
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, Linear, build_activation_layer from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding from mmcv.runner import force_fp32 from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, build_assigner, build_sampler, multi_apply, reduce_mean) from mmdet.models.utils import build_transformer from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class DETRHead(AnchorFreeHead): """Implements the DETR transformer head. See `paper: End-to-End Object Detection with Transformers <https://arxiv.org/pdf/2005.12872>`_ for details. Args: num_classes (int): Number of categories excluding the background. in_channels (int): Number of channels in the input feature map. num_query (int): Number of query in Transformer. num_reg_fcs (int, optional): Number of fully-connected layers used in `FFN`, which is then used for the regression head. Default 2. transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. Default: None. sync_cls_avg_factor (bool): Whether to sync the avg_factor of all ranks. Default to False. positional_encoding (obj:`mmcv.ConfigDict`|dict): Config for position encoding. loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the classification loss. Default `CrossEntropyLoss`. loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the regression loss. Default `L1Loss`. loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the regression iou loss. Default `GIoULoss`. tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of transformer head. test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of transformer head. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ _version = 2 def __init__(self, num_classes, in_channels, num_query=100, num_reg_fcs=2, transformer=None, sync_cls_avg_factor=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), train_cfg=dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict( type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100), init_cfg=None, **kwargs): # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, # since it brings inconvenience when the initialization of # `AnchorFreeHead` is called. super(AnchorFreeHead, self).__init__(init_cfg) self.bg_cls_weight = 0 self.sync_cls_avg_factor = sync_cls_avg_factor class_weight = loss_cls.get('class_weight', None) if class_weight is not None and (self.__class__ is DETRHead): assert isinstance(class_weight, float), 'Expected ' \ 'class_weight to have type float. Found ' \ f'{type(class_weight)}.' # NOTE following the official DETR rep0, bg_cls_weight means # relative classification weight of the no-object class. bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) assert isinstance(bg_cls_weight, float), 'Expected ' \ 'bg_cls_weight to have type float. Found ' \ f'{type(bg_cls_weight)}.' class_weight = torch.ones(num_classes + 1) * class_weight # set background class as the last indice class_weight[num_classes] = bg_cls_weight loss_cls.update({'class_weight': class_weight}) if 'bg_cls_weight' in loss_cls: loss_cls.pop('bg_cls_weight') self.bg_cls_weight = bg_cls_weight if train_cfg: assert 'assigner' in train_cfg, 'assigner should be provided '\ 'when train_cfg is set.' assigner = train_cfg['assigner'] assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ 'The classification weight for loss and matcher should be' \ 'exactly the same.' assert loss_bbox['loss_weight'] == assigner['reg_cost'][ 'weight'], 'The regression L1 weight for loss and matcher ' \ 'should be exactly the same.' assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ 'The regression iou weight for loss and matcher should be' \ 'exactly the same.' self.assigner = build_assigner(assigner) # DETR sampling=False, so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.num_query = num_query self.num_classes = num_classes self.in_channels = in_channels self.num_reg_fcs = num_reg_fcs self.train_cfg = train_cfg self.test_cfg = test_cfg self.fp16_enabled = False self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.loss_iou = build_loss(loss_iou) if self.loss_cls.use_sigmoid: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.act_cfg = transformer.get('act_cfg', dict(type='ReLU', inplace=True)) self.activate = build_activation_layer(self.act_cfg) self.positional_encoding = build_positional_encoding( positional_encoding) self.transformer = build_transformer(transformer) self.embed_dims = self.transformer.embed_dims assert 'num_feats' in positional_encoding num_feats = positional_encoding['num_feats'] assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ f' and {num_feats}.' self._init_layers() def _init_layers(self): """Initialize layers of the transformer head.""" self.input_proj = Conv2d( self.in_channels, self.embed_dims, kernel_size=1) self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) self.reg_ffn = FFN( self.embed_dims, self.embed_dims, self.num_reg_fcs, self.act_cfg, dropout=0.0, add_residual=False) self.fc_reg = Linear(self.embed_dims, 4) self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) def init_weights(self): """Initialize weights of the transformer head.""" # The initialization for transformer is important self.transformer.init_weights() def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """load checkpoints.""" # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, # since `AnchorFreeHead._load_from_state_dict` should not be # called here. Invoking the default `Module._load_from_state_dict` # is enough. # Names of some parameters in has been changed. version = local_metadata.get('version', None) if (version is None or version < 2) and self.__class__ is DETRHead: convert_dict = { '.self_attn.': '.attentions.0.', '.ffn.': '.ffns.0.', '.multihead_attn.': '.attentions.1.', '.decoder.norm.': '.decoder.post_norm.' } state_dict_keys = list(state_dict.keys()) for k in state_dict_keys: for ori_key, convert_key in convert_dict.items(): if ori_key in k: convert_key = k.replace(ori_key, convert_key) state_dict[convert_key] = state_dict[k] del state_dict[k] super(AnchorFreeHead, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats, img_metas): """Forward function. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - all_cls_scores_list (list[Tensor]): Classification scores \ for each scale level. Each is a 4D-tensor with shape \ [nb_dec, bs, num_query, cls_out_channels]. Note \ `cls_out_channels` should includes background. - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ outputs for each scale level. Each is a 4D-tensor with \ normalized coordinate format (cx, cy, w, h) and shape \ [nb_dec, bs, num_query, 4]. """ num_levels = len(feats) img_metas_list = [img_metas for _ in range(num_levels)] return multi_apply(self.forward_single, feats, img_metas_list) def forward_single(self, x, img_metas): """"Forward function for a single feature level. Args: x (Tensor): Input feature from backbone's single stage, shape [bs, c, h, w]. img_metas (list[dict]): List of image information. Returns: all_cls_scores (Tensor): Outputs from the classification head, shape [nb_dec, bs, num_query, cls_out_channels]. Note cls_out_channels should includes background. all_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h). Shape [nb_dec, bs, num_query, 4]. """ # construct binary masks which used for the transformer. # NOTE following the official DETR repo, non-zero values representing # ignored positions, while zero values means valid positions. batch_size = x.size(0) input_img_h, input_img_w = img_metas[0]['batch_input_shape'] masks = x.new_ones((batch_size, input_img_h, input_img_w)) for img_id in range(batch_size): img_h, img_w, _ = img_metas[img_id]['img_shape'] masks[img_id, :img_h, :img_w] = 0 x = self.input_proj(x) # interpolate masks to have the same spatial shape with x masks = F.interpolate( masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) # position encoding pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] # outs_dec: [nb_dec, bs, num_query, embed_dim] outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, pos_embed) all_cls_scores = self.fc_cls(outs_dec) all_bbox_preds = self.fc_reg(self.activate( self.reg_ffn(outs_dec))).sigmoid() return all_cls_scores, all_bbox_preds @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def loss(self, all_cls_scores_list, all_bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore=None): """"Loss function. Only outputs from the last feature level are used for computing losses by default. Args: all_cls_scores_list (list[Tensor]): Classification outputs for each feature level. Each is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds_list (list[Tensor]): Sigmoid regression outputs for each feature level. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: dict[str, Tensor]: A dictionary of loss components. """ # NOTE defaultly only the outputs from the last feature scale is used. all_cls_scores = all_cls_scores_list[-1] all_bbox_preds = all_bbox_preds_list[-1] assert gt_bboxes_ignore is None, \ 'Only supports for gt_bboxes_ignore setting to None.' num_dec_layers = len(all_cls_scores) all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] all_gt_bboxes_ignore_list = [ gt_bboxes_ignore for _ in range(num_dec_layers) ] img_metas_list = [img_metas for _ in range(num_dec_layers)] losses_cls, losses_bbox, losses_iou = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_gt_bboxes_list, all_gt_labels_list, img_metas_list, all_gt_bboxes_ignore_list) loss_dict = dict() # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_bbox'] = losses_bbox[-1] loss_dict['loss_iou'] = losses_iou[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i num_dec_layer += 1 return loss_dict def loss_single(self, cls_scores, bbox_preds, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list=None): """"Loss function for outputs from a single decoder layer of a single feature level. Args: cls_scores (Tensor): Box score logits from a single decoder layer for all images. Shape [bs, num_query, cls_out_channels]. bbox_preds (Tensor): Sigmoid outputs from a single decoder layer for all images, with normalized coordinate (cx, cy, w, h) and shape [bs, num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore_list (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: dict[str, Tensor]: A dictionary of loss components for outputs from a single decoder layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets labels = torch.cat(labels_list, 0) label_weights = torch.cat(label_weights_list, 0) bbox_targets = torch.cat(bbox_targets_list, 0) bbox_weights = torch.cat(bbox_weights_list, 0) # classification loss cls_scores = cls_scores.reshape(-1, self.cls_out_channels) # construct weighted avg_factor to match with the official DETR repo cls_avg_factor = num_total_pos * 1.0 + \ num_total_neg * self.bg_cls_weight if self.sync_cls_avg_factor: cls_avg_factor = reduce_mean( cls_scores.new_tensor([cls_avg_factor])) cls_avg_factor = max(cls_avg_factor, 1) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=cls_avg_factor) # Compute the average number of gt boxes across all gpus, for # normalization purposes num_total_pos = loss_cls.new_tensor([num_total_pos]) num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() # construct factors used for rescale bboxes factors = [] for img_meta, bbox_pred in zip(img_metas, bbox_preds): img_h, img_w, _ = img_meta['img_shape'] factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0).repeat( bbox_pred.size(0), 1) factors.append(factor) factors = torch.cat(factors, 0) # DETR regress the relative position of boxes (cxcywh) in the image, # thus the learning target is normalized by the image size. So here # we need to re-scale them for calculating IoU loss bbox_preds = bbox_preds.reshape(-1, 4) bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors # regression IoU loss, defaultly GIoU loss loss_iou = self.loss_iou( bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) # regression L1 loss loss_bbox = self.loss_bbox( bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) return loss_cls, loss_bbox, loss_iou def get_targets(self, cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list=None): """"Compute regression and classification targets for a batch image. Outputs from a single decoder layer of a single feature level are used. Args: cls_scores_list (list[Tensor]): Box score logits from a single decoder layer for each image with shape [num_query, cls_out_channels]. bbox_preds_list (list[Tensor]): Sigmoid outputs from a single decoder layer for each image, with normalized coordinate (cx, cy, w, h) and shape [num_query, 4]. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore_list (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels for all images. - label_weights_list (list[Tensor]): Label weights for all \ images. - bbox_targets_list (list[Tensor]): BBox targets for all \ images. - bbox_weights_list (list[Tensor]): BBox weights for all \ images. - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. """ assert gt_bboxes_ignore_list is None, \ 'Only supports for gt_bboxes_ignore setting to None.' num_imgs = len(cls_scores_list) gt_bboxes_ignore_list = [ gt_bboxes_ignore_list for _ in range(num_imgs) ] (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) num_total_pos = sum((inds.numel() for inds in pos_inds_list)) num_total_neg = sum((inds.numel() for inds in neg_inds_list)) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, cls_score, bbox_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None): """"Compute regression and classification targets for one image. Outputs from a single decoder layer of a single feature level are used. Args: cls_score (Tensor): Box score logits from a single decoder layer for one image. Shape [num_query, cls_out_channels]. bbox_pred (Tensor): Sigmoid outputs from a single decoder layer for one image, with normalized coordinate (cx, cy, w, h) and shape [num_query, 4]. gt_bboxes (Tensor): Ground truth bboxes for one image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth class indices for one image with shape (num_gts, ). img_meta (dict): Meta information for one image. gt_bboxes_ignore (Tensor, optional): Bounding boxes which can be ignored. Default None. Returns: tuple[Tensor]: a tuple containing the following for one image. - labels (Tensor): Labels of each image. - label_weights (Tensor]): Label weights of each image. - bbox_targets (Tensor): BBox targets of each image. - bbox_weights (Tensor): BBox weights of each image. - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. """ num_bboxes = bbox_pred.size(0) # assigner and sampler assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore) sampling_result = self.sampler.sample(assign_result, bbox_pred, gt_bboxes) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label targets labels = gt_bboxes.new_full((num_bboxes, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_bboxes.new_ones(num_bboxes) # bbox targets bbox_targets = torch.zeros_like(bbox_pred) bbox_weights = torch.zeros_like(bbox_pred) bbox_weights[pos_inds] = 1.0 img_h, img_w, _ = img_meta['img_shape'] # DETR regress the relative position of boxes (cxcywh) in the image. # Thus the learning target should be normalized by the image size, also # the box format should be converted from defaultly x1y1x2y2 to cxcywh. factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) bbox_targets[pos_inds] = pos_gt_bboxes_targets return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) # over-write because img_metas are needed as inputs for bbox_head. def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """Forward function for training mode. Args: x (list[Tensor]): Features from backbone. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert proposal_cfg is None, '"proposal_cfg" must be None' outs = self(x, img_metas) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) return losses @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def get_bboxes(self, all_cls_scores_list, all_bbox_preds_list, img_metas, rescale=False): """Transform network outputs for a batch into bbox predictions. Args: all_cls_scores_list (list[Tensor]): Classification outputs for each feature level. Each is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds_list (list[Tensor]): Sigmoid regression outputs for each feature level. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. img_metas (list[dict]): Meta information of each image. rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ The first item is an (n, 5) tensor, where the first 4 columns \ are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ 5-th column is a score between 0 and 1. The second item is a \ (n,) tensor where each item is the predicted class label of \ the corresponding box. """ # NOTE defaultly only using outputs from the last feature level, # and only the outputs from the last decoder layer is used. cls_scores = all_cls_scores_list[-1][-1] bbox_preds = all_bbox_preds_list[-1][-1] result_list = [] for img_id in range(len(img_metas)): cls_score = cls_scores[img_id] bbox_pred = bbox_preds[img_id] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score, bbox_pred, img_shape, scale_factor, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_score, bbox_pred, img_shape, scale_factor, rescale=False): """Transform outputs from the last decoder layer into bbox predictions for each image. Args: cls_score (Tensor): Box score logits from the last decoder layer for each image. Shape [num_query, cls_out_channels]. bbox_pred (Tensor): Sigmoid outputs from the last decoder layer for each image, with coordinate format (cx, cy, w, h) and shape [num_query, 4]. img_shape (tuple[int]): Shape of input image, (height, width, 3). scale_factor (ndarray, optional): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: tuple[Tensor]: Results of detected bboxes and labels. - det_bboxes: Predicted bboxes with shape [num_query, 5], \ where the first 4 columns are bounding box positions \ (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ between 0 and 1. - det_labels: Predicted labels of the corresponding box with \ shape [num_query]. """ assert len(cls_score) == len(bbox_pred) max_per_img = self.test_cfg.get('max_per_img', self.num_query) # exclude background if self.loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() scores, indexes = cls_score.view(-1).topk(max_per_img) det_labels = indexes % self.num_classes bbox_index = indexes // self.num_classes bbox_pred = bbox_pred[bbox_index] else: scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) scores, bbox_index = scores.topk(max_per_img) bbox_pred = bbox_pred[bbox_index] det_labels = det_labels[bbox_index] det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) if rescale: det_bboxes /= det_bboxes.new_tensor(scale_factor) det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) return det_bboxes, det_labels def simple_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,) """ # forward of this head requires img_metas outs = self.forward(feats, img_metas) results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) return results_list def forward_onnx(self, feats, img_metas): """Forward function for exporting to ONNX. Over-write `forward` because: `masks` is directly created with zero (valid position tag) and has the same spatial size as `x`. Thus the construction of `masks` is different from that in `forward`. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - all_cls_scores_list (list[Tensor]): Classification scores \ for each scale level. Each is a 4D-tensor with shape \ [nb_dec, bs, num_query, cls_out_channels]. Note \ `cls_out_channels` should includes background. - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ outputs for each scale level. Each is a 4D-tensor with \ normalized coordinate format (cx, cy, w, h) and shape \ [nb_dec, bs, num_query, 4]. """ num_levels = len(feats) img_metas_list = [img_metas for _ in range(num_levels)] return multi_apply(self.forward_single_onnx, feats, img_metas_list) def forward_single_onnx(self, x, img_metas): """"Forward function for a single feature level with ONNX exportation. Args: x (Tensor): Input feature from backbone's single stage, shape [bs, c, h, w]. img_metas (list[dict]): List of image information. Returns: all_cls_scores (Tensor): Outputs from the classification head, shape [nb_dec, bs, num_query, cls_out_channels]. Note cls_out_channels should includes background. all_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h). Shape [nb_dec, bs, num_query, 4]. """ # Note `img_shape` is not dynamically traceable to ONNX, # since the related augmentation was done with numpy under # CPU. Thus `masks` is directly created with zeros (valid tag) # and the same spatial shape as `x`. # The difference between torch and exported ONNX model may be # ignored, since the same performance is achieved (e.g. # 40.1 vs 40.1 for DETR) batch_size = x.size(0) h, w = x.size()[-2:] masks = x.new_zeros((batch_size, h, w)) # [B,h,w] x = self.input_proj(x) # interpolate masks to have the same spatial shape with x masks = F.interpolate( masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) pos_embed = self.positional_encoding(masks) outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, pos_embed) all_cls_scores = self.fc_cls(outs_dec) all_bbox_preds = self.fc_reg(self.activate( self.reg_ffn(outs_dec))).sigmoid() return all_cls_scores, all_bbox_preds def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas): """Transform network outputs into bbox predictions, with ONNX exportation. Args: all_cls_scores_list (list[Tensor]): Classification outputs for each feature level. Each is a 4D-tensor with shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds_list (list[Tensor]): Sigmoid regression outputs for each feature level. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. img_metas (list[dict]): Meta information of each image. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ assert len(img_metas) == 1, \ 'Only support one input image while in exporting to ONNX' cls_scores = all_cls_scores_list[-1][-1] bbox_preds = all_bbox_preds_list[-1][-1] # Note `img_shape` is not dynamically traceable to ONNX, # here `img_shape_for_onnx` (padded shape of image tensor) # is used. img_shape = img_metas[0]['img_shape_for_onnx'] max_per_img = self.test_cfg.get('max_per_img', self.num_query) batch_size = cls_scores.size(0) # `batch_index_offset` is used for the gather of concatenated tensor batch_index_offset = torch.arange(batch_size).to( cls_scores.device) * max_per_img batch_index_offset = batch_index_offset.unsqueeze(1).expand( batch_size, max_per_img) # supports dynamical batch inference if self.loss_cls.use_sigmoid: cls_scores = cls_scores.sigmoid() scores, indexes = cls_scores.view(batch_size, -1).topk( max_per_img, dim=1) det_labels = indexes % self.num_classes bbox_index = indexes // self.num_classes bbox_index = (bbox_index + batch_index_offset).view(-1) bbox_preds = bbox_preds.view(-1, 4)[bbox_index] bbox_preds = bbox_preds.view(batch_size, -1, 4) else: scores, det_labels = F.softmax( cls_scores, dim=-1)[..., :-1].max(-1) scores, bbox_index = scores.topk(max_per_img, dim=1) bbox_index = (bbox_index + batch_index_offset).view(-1) bbox_preds = bbox_preds.view(-1, 4)[bbox_index] det_labels = det_labels.view(-1)[bbox_index] bbox_preds = bbox_preds.view(batch_size, -1, 4) det_labels = det_labels.view(batch_size, -1) det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds) # use `img_shape_tensor` for dynamically exporting to ONNX img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h] img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand( batch_size, det_bboxes.size(1), 4) det_bboxes = det_bboxes * img_shape_tensor # dynamically clip bboxes x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1) from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape) det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1) det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1) return det_bboxes, det_labels
39,707
45.991716
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.ops import batched_nms from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RPNHead(AnchorHead): """RPN head. Args: in_channels (int): Number of channels in the input feature map. init_cfg (dict or list[dict], optional): Initialization config dict. num_convs (int): Number of convolution layers in the head. Default 1. """ # noqa: W605 def __init__(self, in_channels, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01), num_convs=1, **kwargs): self.num_convs = num_convs super(RPNHead, self).__init__( 1, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" if self.num_convs > 1: rpn_convs = [] for i in range(self.num_convs): if i == 0: in_channels = self.in_channels else: in_channels = self.feat_channels # use ``inplace=False`` to avoid error: one of the variables # needed for gradient computation has been modified by an # inplace operation. rpn_convs.append( ConvModule( in_channels, self.feat_channels, 3, padding=1, inplace=False)) self.rpn_conv = nn.Sequential(*rpn_convs) else: self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_base_priors * 4, 1) def forward_single(self, x): """Forward feature map of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=True) rpn_cls_score = self.rpn_cls(x) rpn_bbox_pred = self.rpn_reg(x) return rpn_cls_score, rpn_bbox_pred def loss(self, cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ losses = super(RPNHead, self).loss( cls_scores, bbox_preds, gt_bboxes, None, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_anchors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_anchors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_anchors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. RPN head does not need this value. mlvl_anchors (list[Tensor]): Anchors of all scale level each item has shape (num_anchors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] # bboxes from different level should be independent during NMS, # level_ids are used as labels for batched NMS to separate them level_ids = [] mlvl_scores = [] mlvl_bbox_preds = [] mlvl_valid_anchors = [] nms_pre = cfg.get('nms_pre', -1) for level_idx in range(len(cls_score_list)): rpn_cls_score = cls_score_list[level_idx] rpn_bbox_pred = bbox_pred_list[level_idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # We set FG labels to [0, num_class-1] and BG label to # num_class in RPN head since mmdet v2.5, which is unified to # be consistent with other head since mmdet v2.0. In mmdet v2.0 # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = rpn_cls_score.softmax(dim=1)[:, 0] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) anchors = mlvl_anchors[level_idx] if 0 < nms_pre < scores.shape[0]: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:nms_pre] scores = ranked_scores[:nms_pre] rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] mlvl_scores.append(scores) mlvl_bbox_preds.append(rpn_bbox_pred) mlvl_valid_anchors.append(anchors) level_ids.append( scores.new_full((scores.size(0), ), level_idx, dtype=torch.long)) return self._bbox_post_process(mlvl_scores, mlvl_bbox_preds, mlvl_valid_anchors, level_ids, cfg, img_shape) def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anchors, level_ids, cfg, img_shape, **kwargs): """bbox post-processing method. Do the nms operation for bboxes in same level. Args: mlvl_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_bboxes, ). mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale levels of a single image, each item has shape (num_bboxes, 4). mlvl_valid_anchors (list[Tensor]): Anchors of all scale level each item has shape (num_bboxes, 4). level_ids (list[Tensor]): Indexes from all scale levels of a single image, each item has shape (num_bboxes, ). cfg (mmcv.Config): Test / postprocessing configuration, if None, `self.test_cfg` would be used. img_shape (tuple(int)): The shape of model's input image. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ scores = torch.cat(mlvl_scores) anchors = torch.cat(mlvl_valid_anchors) rpn_bbox_pred = torch.cat(mlvl_bboxes) proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) ids = torch.cat(level_ids) if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] ids = ids[valid_mask] if proposals.numel() > 0: dets, _ = batched_nms(proposals, scores, ids, cfg.nms) else: return proposals.new_zeros(0, 5) return dets[:cfg.max_per_img] def onnx_export(self, x, img_metas): """Test without augmentation. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: Tensor: dets of shape [N, num_det, 5]. """ cls_scores, bbox_preds = self(x) assert len(cls_scores) == len(bbox_preds) batch_bboxes, batch_scores = super(RPNHead, self).onnx_export( cls_scores, bbox_preds, img_metas=img_metas, with_nms=False) # Use ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx cfg = copy.deepcopy(self.test_cfg) score_threshold = cfg.nms.get('score_thr', 0.0) nms_pre = cfg.get('deploy_nms_pre', -1) # Different from the normal forward doing NMS level by level, # we do NMS across all levels when exporting ONNX. dets, _ = add_dummy_nms_for_onnx(batch_bboxes, batch_scores, cfg.max_per_img, cfg.nms.iou_threshold, score_threshold, nms_pre, cfg.max_per_img) return dets
11,201
41.112782
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/anchor_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, build_prior_generator, build_sampler, images_to_levels, multi_apply, unmap) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class AnchorHead(BaseDenseHead, BBoxTestMixin): """Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes, in_channels, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0)), reg_decoded_bbox=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01)): super(AnchorHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 if self.cls_out_channels <= 0: raise ValueError(f'num_classes={num_classes} is too small') self.reg_decoded_bbox = reg_decoded_bbox self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) if hasattr(self.train_cfg, 'sampler') and self.train_cfg.sampler.type.split( '.')[-1] != 'PseudoSampler': self.sampling = True sampler_cfg = self.train_cfg.sampler # avoid BC-breaking if loss_cls['type'] in [ 'FocalLoss', 'GHMC', 'QualityFocalLoss' ]: warnings.warn( 'DeprecationWarning: Determining whether to sampling' 'by loss type is deprecated, please delete sampler in' 'your config when using `FocalLoss`, `GHMC`, ' '`QualityFocalLoss` or other FocalLoss variant.') self.sampling = False sampler_cfg = dict(type='PseudoSampler') else: self.sampling = False sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self.prior_generator = build_prior_generator(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors[0] self._init_layers() @property def num_anchors(self): warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'for consistency or also use ' '`num_base_priors` instead') return self.prior_generator.num_base_priors[0] @property def anchor_generator(self): warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' 'please use "prior_generator" instead') return self.prior_generator def _init_layers(self): """Initialize layers of the head.""" self.conv_cls = nn.Conv2d(self.in_channels, self.num_base_priors * self.cls_out_channels, 1) self.conv_reg = nn.Conv2d(self.in_channels, self.num_base_priors * 4, 1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_base_priors * 4. """ cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) return cls_score, bbox_pred def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_scores (list[Tensor]): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * 4. """ return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): Device for returned tensors Returns: tuple: anchor_list (list[Tensor]): Anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). img_meta (dict): Meta info of the image. gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels_list (list[Tensor]): Labels of each level label_weights_list (list[Tensor]): Label weights of each level bbox_targets_list (list[Tensor]): BBox targets of each level bbox_weights_list (list[Tensor]): BBox weights of each level num_total_pos (int): Number of positive samples in all images num_total_neg (int): Number of negative samples in all images """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] assign_result = self.assigner.assign( anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True, return_sampling_results=False): """Compute regression and classification targets for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - bbox_weights_list (list[Tensor]): BBox weights of each level. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors to a single tensor concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] rest_results = list(results[7:]) # user-added return values # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) res = (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) if return_sampling_results: res = res + (sampling_results_list, ) for i, r in enumerate(rest_results): # user-added return values rest_results[i] = images_to_levels(r, num_level_anchors) return res + tuple(rest_results) def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (N, num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. anchors = anchors.reshape(-1, 4) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,), The length of list should always be 1. """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
24,605
44.314917
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/yolox_head.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, bias_init_with_prob) from mmcv.ops.nms import batched_nms from mmcv.runner import force_fp32 from mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh, build_assigner, build_sampler, multi_apply, reduce_mean) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class YOLOXHead(BaseDenseHead, BBoxTestMixin): """YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels in stacking convs. Default: 256 stacked_convs (int): Number of stacking convs of the head. Default: 2. strides (tuple): Downsample factor of each feature map. use_depthwise (bool): Whether to depthwise separable convolution in blocks. Default: False dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (dict): Config dict for activation layer. Default: None. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. loss_obj (dict): Config of objectness loss. loss_l1 (dict): Config of L1 loss. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=2, strides=[8, 16, 32], use_depthwise=False, dcn_on_last_conv=False, conv_bias='auto', conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_bbox=dict( type='IoULoss', mode='square', eps=1e-16, reduction='sum', loss_weight=5.0), loss_obj=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.cls_out_channels = num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.use_depthwise = use_depthwise self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.use_sigmoid_cls = True self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.loss_obj = build_loss(loss_obj) self.use_l1 = False # This flag will be modified by hooks. self.loss_l1 = build_loss(loss_l1) self.prior_generator = MlvlPointGenerator(strides, offset=0) self.test_cfg = test_cfg self.train_cfg = train_cfg self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() def _init_layers(self): self.multi_level_cls_convs = nn.ModuleList() self.multi_level_reg_convs = nn.ModuleList() self.multi_level_conv_cls = nn.ModuleList() self.multi_level_conv_reg = nn.ModuleList() self.multi_level_conv_obj = nn.ModuleList() for _ in self.strides: self.multi_level_cls_convs.append(self._build_stacked_convs()) self.multi_level_reg_convs.append(self._build_stacked_convs()) conv_cls, conv_reg, conv_obj = self._build_predictor() self.multi_level_conv_cls.append(conv_cls) self.multi_level_conv_reg.append(conv_reg) self.multi_level_conv_obj.append(conv_obj) def _build_stacked_convs(self): """Initialize conv layers of a single level head.""" conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule stacked_convs = [] for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg stacked_convs.append( conv( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, bias=self.conv_bias)) return nn.Sequential(*stacked_convs) def _build_predictor(self): """Initialize predictor layers of a single level head.""" conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) conv_reg = nn.Conv2d(self.feat_channels, 4, 1) conv_obj = nn.Conv2d(self.feat_channels, 1, 1) return conv_cls, conv_reg, conv_obj def init_weights(self): super(YOLOXHead, self).init_weights() # Use prior in model initialization to improve stability bias_init = bias_init_with_prob(0.01) for conv_cls, conv_obj in zip(self.multi_level_conv_cls, self.multi_level_conv_obj): conv_cls.bias.data.fill_(bias_init) conv_obj.bias.data.fill_(bias_init) def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg, conv_obj): """Forward feature of a single scale level.""" cls_feat = cls_convs(x) reg_feat = reg_convs(x) cls_score = conv_cls(cls_feat) bbox_pred = conv_reg(reg_feat) objectness = conv_obj(reg_feat) return cls_score, bbox_pred, objectness def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ return multi_apply(self.forward_single, feats, self.multi_level_cls_convs, self.multi_level_reg_convs, self.multi_level_conv_cls, self.multi_level_conv_reg, self.multi_level_conv_obj) def get_bboxes(self, cls_scores, bbox_preds, objectnesses, img_metas=None, cfg=None, rescale=False, with_nms=True): """Transform network outputs of a batch into bbox results. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). img_metas (list[dict], Optional): Image meta info. Default None. cfg (mmcv.Config, Optional): Test / postprocessing configuration, if None, test_cfg would be used. Default None. rescale (bool): If True, return boxes in original image space. Default False. with_nms (bool): If True, do nms before return boxes. Default True. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) cfg = self.test_cfg if cfg is None else cfg scale_factors = [img_meta['scale_factor'] for img_meta in img_metas] num_imgs = len(img_metas) featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) # flatten cls_scores, bbox_preds and objectness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_objectness = [ objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses ] flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() flatten_priors = torch.cat(mlvl_priors) flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) if rescale: flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( scale_factors).unsqueeze(1) result_list = [] for img_id in range(len(img_metas)): cls_scores = flatten_cls_scores[img_id] score_factor = flatten_objectness[img_id] bboxes = flatten_bboxes[img_id] result_list.append( self._bboxes_nms(cls_scores, bboxes, score_factor, cfg)) return result_list def _bbox_decode(self, priors, bbox_preds): xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2] whs = bbox_preds[..., 2:].exp() * priors[:, 2:] tl_x = (xys[..., 0] - whs[..., 0] / 2) tl_y = (xys[..., 1] - whs[..., 1] / 2) br_x = (xys[..., 0] + whs[..., 0] / 2) br_y = (xys[..., 1] + whs[..., 1] / 2) decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) return decoded_bboxes def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg): max_scores, labels = torch.max(cls_scores, 1) valid_mask = score_factor * max_scores >= cfg.score_thr bboxes = bboxes[valid_mask] scores = max_scores[valid_mask] * score_factor[valid_mask] labels = labels[valid_mask] if labels.numel() == 0: return bboxes, labels else: dets, keep = batched_nms(bboxes, scores, labels, cfg.nms) return dets, labels[keep] @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) def loss(self, cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_priors * 4. objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. """ num_imgs = len(img_metas) featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) flatten_cls_preds = [ cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_pred in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_objectness = [ objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses ] flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) flatten_objectness = torch.cat(flatten_objectness, dim=1) flatten_priors = torch.cat(mlvl_priors) flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, num_fg_imgs) = multi_apply( self._get_target_single, flatten_cls_preds.detach(), flatten_objectness.detach(), flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), flatten_bboxes.detach(), gt_bboxes, gt_labels) # The experimental results show that ‘reduce_mean’ can improve # performance on the COCO dataset. num_pos = torch.tensor( sum(num_fg_imgs), dtype=torch.float, device=flatten_cls_preds.device) num_total_samples = max(reduce_mean(num_pos), 1.0) pos_masks = torch.cat(pos_masks, 0) cls_targets = torch.cat(cls_targets, 0) obj_targets = torch.cat(obj_targets, 0) bbox_targets = torch.cat(bbox_targets, 0) if self.use_l1: l1_targets = torch.cat(l1_targets, 0) loss_bbox = self.loss_bbox( flatten_bboxes.view(-1, 4)[pos_masks], bbox_targets) / num_total_samples loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), obj_targets) / num_total_samples loss_cls = self.loss_cls( flatten_cls_preds.view(-1, self.num_classes)[pos_masks], cls_targets) / num_total_samples loss_dict = dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) if self.use_l1: loss_l1 = self.loss_l1( flatten_bbox_preds.view(-1, 4)[pos_masks], l1_targets) / num_total_samples loss_dict.update(loss_l1=loss_l1) return loss_dict @torch.no_grad() def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes, gt_bboxes, gt_labels): """Compute classification, regression, and objectness targets for priors in a single image. Args: cls_preds (Tensor): Classification predictions of one image, a 2D-Tensor with shape [num_priors, num_classes] objectness (Tensor): Objectness predictions of one image, a 1D-Tensor with shape [num_priors] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Decoded bboxes predictions of one image, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. """ num_priors = priors.size(0) num_gts = gt_labels.size(0) gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype) # No target if num_gts == 0: cls_target = cls_preds.new_zeros((0, self.num_classes)) bbox_target = cls_preds.new_zeros((0, 4)) l1_target = cls_preds.new_zeros((0, 4)) obj_target = cls_preds.new_zeros((num_priors, 1)) foreground_mask = cls_preds.new_zeros(num_priors).bool() return (foreground_mask, cls_target, obj_target, bbox_target, l1_target, 0) # YOLOX uses center priors with 0.5 offset to assign targets, # but use center priors without offset to regress bboxes. offset_priors = torch.cat( [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) assign_result = self.assigner.assign( cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(), offset_priors, decoded_bboxes, gt_bboxes, gt_labels) sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes) pos_inds = sampling_result.pos_inds num_pos_per_img = pos_inds.size(0) pos_ious = assign_result.max_overlaps[pos_inds] # IOU aware classification score cls_target = F.one_hot(sampling_result.pos_gt_labels, self.num_classes) * pos_ious.unsqueeze(-1) obj_target = torch.zeros_like(objectness).unsqueeze(-1) obj_target[pos_inds] = 1 bbox_target = sampling_result.pos_gt_bboxes l1_target = cls_preds.new_zeros((num_pos_per_img, 4)) if self.use_l1: l1_target = self._get_l1_target(l1_target, bbox_target, priors[pos_inds]) foreground_mask = torch.zeros_like(objectness).to(torch.bool) foreground_mask[pos_inds] = 1 return (foreground_mask, cls_target, obj_target, bbox_target, l1_target, num_pos_per_img) def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8): """Convert gt bboxes to center offset and log width height.""" gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:] l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) return l1_target
20,874
41.515275
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaHead(AnchorHead): r"""An anchor-based head used in `RetinaNet <https://arxiv.org/pdf/1708.02002.pdf>`_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors. Example: >>> import torch >>> self = RetinaHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == (self.num_classes) >>> assert box_per_anchor == 4 """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(RetinaHead, self).__init__( num_classes, in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return cls_score, bbox_pred
4,059
34
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/ga_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv import ConfigDict from mmcv.ops import nms from ..builder import HEADS from .guided_anchor_head import GuidedAnchorHead @HEADS.register_module() class GARPNHead(GuidedAnchorHead): """Guided-Anchor-based RPN head.""" def __init__(self, in_channels, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01)), **kwargs): super(GARPNHead, self).__init__( 1, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) super(GARPNHead, self)._init_layers() def forward_single(self, x): """Forward feature of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=True) (cls_score, bbox_pred, shape_pred, loc_pred) = super(GARPNHead, self).forward_single(x) return cls_score, bbox_pred, shape_pred, loc_pred def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): losses = super(GARPNHead, self).loss( cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, None, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'], loss_anchor_shape=losses['loss_shape'], loss_anchor_loc=losses['loss_loc']) def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You ' \ f'set max_num and max_per_img at the same time, ' \ f'but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ 'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the ' \ f'nms_thr which will be deprecated.' assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \ 'naive nms.' mlvl_proposals = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] anchors = mlvl_anchors[idx] mask = mlvl_masks[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = rpn_cls_score.softmax(dim=1)[:, :-1] # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)[mask, :] if scores.dim() == 0: rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: _, topk_inds = scores.topk(cfg.nms_pre) rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] scores = scores[topk_inds] # get proposals w.r.t. anchors and rpn_bbox_pred proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) # filter out too small bboxes if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] # NMS in current level proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold) proposals = proposals[:cfg.nms_post, :] mlvl_proposals.append(proposals) proposals = torch.cat(mlvl_proposals, 0) if cfg.get('nms_across_levels', False): # NMS across multi levels proposals, _ = nms(proposals[:, :4], proposals[:, -1], cfg.nms.iou_threshold) proposals = proposals[:cfg.max_per_img, :] else: scores = proposals[:, 4] num = min(cfg.max_per_img, proposals.shape[0]) _, topk_inds = scores.topk(num) proposals = proposals[topk_inds, :] return proposals
7,052
38.623596
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/tood_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.ops import deform_conv2d from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, distance2bbox, images_to_levels, multi_apply, reduce_mean, unmap) from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .atss_head import ATSSHead class TaskDecomposition(nn.Module): """Task decomposition module in task-aligned predictor of TOOD. Args: feat_channels (int): Number of feature channels in TOOD head. stacked_convs (int): Number of conv layers in TOOD head. la_down_rate (int): Downsample rate of layer attention. conv_cfg (dict): Config dict for convolution layer. norm_cfg (dict): Config dict for normalization layer. """ def __init__(self, feat_channels, stacked_convs, la_down_rate=8, conv_cfg=None, norm_cfg=None): super(TaskDecomposition, self).__init__() self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.in_channels = self.feat_channels * self.stacked_convs self.norm_cfg = norm_cfg self.layer_attention = nn.Sequential( nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1), nn.ReLU(inplace=True), nn.Conv2d( self.in_channels // la_down_rate, self.stacked_convs, 1, padding=0), nn.Sigmoid()) self.reduction_conv = ConvModule( self.in_channels, self.feat_channels, 1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=norm_cfg is None) def init_weights(self): for m in self.layer_attention.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) normal_init(self.reduction_conv.conv, std=0.01) def forward(self, feat, avg_feat=None): b, c, h, w = feat.shape if avg_feat is None: avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) weight = self.layer_attention(avg_feat) # here we first compute the product between layer attention weight and # conv weight, and then compute the convolution between new conv weight # and feature map, in order to save memory and FLOPs. conv_weight = weight.reshape( b, 1, self.stacked_convs, 1) * self.reduction_conv.conv.weight.reshape( 1, self.feat_channels, self.stacked_convs, self.feat_channels) conv_weight = conv_weight.reshape(b, self.feat_channels, self.in_channels) feat = feat.reshape(b, self.in_channels, h * w) feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h, w) if self.norm_cfg is not None: feat = self.reduction_conv.norm(feat) feat = self.reduction_conv.activate(feat) return feat @HEADS.register_module() class TOODHead(ATSSHead): """TOODHead used in `TOOD: Task-aligned One-stage Object Detection. <https://arxiv.org/abs/2108.07755>`_. TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment Learning (TAL). Args: num_dcn (int): Number of deformable convolution in the head. Default: 0. anchor_type (str): If set to `anchor_free`, the head will use centers to regress bboxes. If set to `anchor_based`, the head will regress bboxes based on anchors. Default: `anchor_free`. initial_loss_cls (dict): Config of initial loss. Example: >>> self = TOODHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, num_dcn=0, anchor_type='anchor_free', initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, gamma=2.0, alpha=0.25, loss_weight=1.0), **kwargs): assert anchor_type in ['anchor_free', 'anchor_based'] self.num_dcn = num_dcn self.anchor_type = anchor_type self.epoch = 0 # which would be update in SetEpochInfoHook! super(TOODHead, self).__init__(num_classes, in_channels, **kwargs) if self.train_cfg: self.initial_epoch = self.train_cfg.initial_epoch self.initial_assigner = build_assigner( self.train_cfg.initial_assigner) self.initial_loss_cls = build_loss(initial_loss_cls) self.assigner = self.initial_assigner self.alignment_assigner = build_assigner(self.train_cfg.assigner) self.alpha = self.train_cfg.alpha self.beta = self.train_cfg.beta def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.inter_convs = nn.ModuleList() for i in range(self.stacked_convs): if i < self.num_dcn: conv_cfg = dict(type='DCNv2', deform_groups=4) else: conv_cfg = self.conv_cfg chn = self.in_channels if i == 0 else self.feat_channels self.inter_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.cls_decomp = TaskDecomposition(self.feat_channels, self.stacked_convs, self.stacked_convs * 8, self.conv_cfg, self.norm_cfg) self.reg_decomp = TaskDecomposition(self.feat_channels, self.stacked_convs, self.stacked_convs * 8, self.conv_cfg, self.norm_cfg) self.tood_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.tood_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.cls_prob_module = nn.Sequential( nn.Conv2d(self.feat_channels * self.stacked_convs, self.feat_channels // 4, 1), nn.ReLU(inplace=True), nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1)) self.reg_offset_module = nn.Sequential( nn.Conv2d(self.feat_channels * self.stacked_convs, self.feat_channels // 4, 1), nn.ReLU(inplace=True), nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1)) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def init_weights(self): """Initialize weights of the head.""" bias_cls = bias_init_with_prob(0.01) for m in self.inter_convs: normal_init(m.conv, std=0.01) for m in self.cls_prob_module: if isinstance(m, nn.Conv2d): normal_init(m, std=0.01) for m in self.reg_offset_module: if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls) self.cls_decomp.init_weights() self.reg_decomp.init_weights() normal_init(self.tood_cls, std=0.01, bias=bias_cls) normal_init(self.tood_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Decoded box for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format. """ cls_scores = [] bbox_preds = [] for idx, (x, scale, stride) in enumerate( zip(feats, self.scales, self.prior_generator.strides)): b, c, h, w = x.shape anchor = self.prior_generator.single_level_grid_priors( (h, w), idx, device=x.device) anchor = torch.cat([anchor for _ in range(b)]) # extract task interactive features inter_feats = [] for inter_conv in self.inter_convs: x = inter_conv(x) inter_feats.append(x) feat = torch.cat(inter_feats, 1) # task decomposition avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) cls_feat = self.cls_decomp(feat, avg_feat) reg_feat = self.reg_decomp(feat, avg_feat) # cls prediction and alignment cls_logits = self.tood_cls(cls_feat) cls_prob = self.cls_prob_module(feat) cls_score = (cls_logits.sigmoid() * cls_prob.sigmoid()).sqrt() # reg prediction and alignment if self.anchor_type == 'anchor_free': reg_dist = scale(self.tood_reg(reg_feat).exp()).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = distance2bbox( self.anchor_center(anchor) / stride[0], reg_dist).reshape(b, h, w, 4).permute(0, 3, 1, 2) # (b, c, h, w) elif self.anchor_type == 'anchor_based': reg_dist = scale(self.tood_reg(reg_feat)).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape( b, h, w, 4).permute(0, 3, 1, 2) / stride[0] else: raise NotImplementedError( f'Unknown anchor type: {self.anchor_type}.' f'Please use `anchor_free` or `anchor_based`.') reg_offset = self.reg_offset_module(feat) bbox_pred = self.deform_sampling(reg_bbox.contiguous(), reg_offset.contiguous()) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return tuple(cls_scores), tuple(bbox_preds) def deform_sampling(self, feat, offset): """Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for for feature sampliing """ # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y def anchor_center(self, anchors): """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, alignment_metrics, stride): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Decoded bboxes for each scale level with shape (N, num_anchors * 4, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors). bbox_targets (Tensor): BBox regression targets of each anchor with shape (N, num_total_anchors, 4). alignment_metrics (Tensor): Alignment metrics with shape (N, num_total_anchors). stride (tuple[int]): Downsample stride of the feature map. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) alignment_metrics = alignment_metrics.reshape(-1) label_weights = label_weights.reshape(-1) targets = labels if self.epoch < self.initial_epoch else ( labels, alignment_metrics) cls_loss_func = self.initial_loss_cls \ if self.epoch < self.initial_epoch else self.loss_cls loss_cls = cls_loss_func( cls_score, targets, label_weights, avg_factor=1.0) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_decode_bbox_pred = pos_bbox_pred pos_decode_bbox_targets = pos_bbox_targets / stride[0] # regression loss pos_bbox_weight = self.centerness_target( pos_anchors, pos_bbox_targets ) if self.epoch < self.initial_epoch else alignment_metrics[ pos_inds] loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=pos_bbox_weight, avg_factor=1.0) else: loss_bbox = bbox_pred.sum() * 0 pos_bbox_weight = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, alignment_metrics.sum( ), pos_bbox_weight.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Decoded box for each scale level with shape (N, num_anchors * 4, H, W) in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 flatten_cls_scores = torch.cat([ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ], 1) flatten_bbox_preds = torch.cat([ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0] for bbox_pred, stride in zip(bbox_preds, self.prior_generator.strides) ], 1) cls_reg_targets = self.get_targets( flatten_cls_scores, flatten_bbox_preds, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) (anchor_list, labels_list, label_weights_list, bbox_targets_list, alignment_metrics_list) = cls_reg_targets losses_cls, losses_bbox,\ cls_avg_factors, bbox_avg_factors = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, alignment_metrics_list, self.prior_generator.strides) cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) bbox_avg_factor = reduce_mean( sum(bbox_avg_factors)).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for cls_score, bbox_pred, priors, stride in zip( cls_score_list, bbox_pred_list, mlvl_priors, self.prior_generator.strides): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0] scores = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bboxes = filtered_results['bbox_pred'] mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms, None, **kwargs) def get_targets(self, cls_scores, bbox_preds, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores (Tensor): Classification predictions of images, a 3D-Tensor with shape [num_imgs, num_priors, num_classes]. bbox_preds (Tensor): Decoded bboxes predictions of one image, a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: a tuple containing learning targets. - anchors_list (list[list[Tensor]]): Anchors of each level. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - norm_alignment_metrics_list (list[Tensor]): Normalized alignment metrics of each level. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] # anchor_list: list(b * [-1, 4]) if self.epoch < self.initial_epoch: (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( super()._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) all_assign_metrics = [ weight[..., 0] for weight in all_bbox_weights ] else: (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_assign_metrics) = multi_apply( self._get_target_single, cls_scores, bbox_preds, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) norm_alignment_metrics_list = images_to_levels(all_assign_metrics, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, norm_alignment_metrics_list) def _get_target_single(self, cls_scores, bbox_preds, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: cls_scores (list(Tensor)): Box scores for each image. bbox_preds (list(Tensor)): Box energies / deltas for each image. flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). norm_alignment_metrics (Tensor): Normalized alignment metrics of all priors in the image with shape (N,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] assign_result = self.alignment_assigner.assign( cls_scores[inside_flags, :], bbox_preds[inside_flags, :], anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, self.alpha, self.beta) assign_ious = assign_result.max_overlaps assign_metrics = assign_result.assign_metrics sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) norm_alignment_metrics = anchors.new_zeros( num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: # point-based pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 class_assigned_gt_inds = torch.unique( sampling_result.pos_assigned_gt_inds) for gt_inds in class_assigned_gt_inds: gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds == gt_inds] pos_alignment_metrics = assign_metrics[gt_class_inds] pos_ious = assign_ious[gt_class_inds] pos_norm_alignment_metrics = pos_alignment_metrics / ( pos_alignment_metrics.max() + 10e-8) * pos_ious.max() norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) norm_alignment_metrics = unmap(norm_alignment_metrics, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, norm_alignment_metrics)
33,854
43.024707
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/deformable_detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Linear, bias_init_with_prob, constant_init from mmcv.runner import force_fp32 from mmdet.core import multi_apply from mmdet.models.utils.transformer import inverse_sigmoid from ..builder import HEADS from .detr_head import DETRHead @HEADS.register_module() class DeformableDETRHead(DETRHead): """Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to- End Object Detection. Code is modified from the `official github repo <https://github.com/fundamentalvision/Deformable-DETR>`_. More details can be found in the `paper <https://arxiv.org/abs/2010.04159>`_ . Args: with_box_refine (bool): Whether to refine the reference points in the decoder. Defaults to False. as_two_stage (bool) : Whether to generate the proposal from the outputs of encoder. transformer (obj:`ConfigDict`): ConfigDict is used for building the Encoder and Decoder. """ def __init__(self, *args, with_box_refine=False, as_two_stage=False, transformer=None, **kwargs): self.with_box_refine = with_box_refine self.as_two_stage = as_two_stage if self.as_two_stage: transformer['as_two_stage'] = self.as_two_stage super(DeformableDETRHead, self).__init__( *args, transformer=transformer, **kwargs) def _init_layers(self): """Initialize classification branch and regression branch of head.""" fc_cls = Linear(self.embed_dims, self.cls_out_channels) reg_branch = [] for _ in range(self.num_reg_fcs): reg_branch.append(Linear(self.embed_dims, self.embed_dims)) reg_branch.append(nn.ReLU()) reg_branch.append(Linear(self.embed_dims, 4)) reg_branch = nn.Sequential(*reg_branch) def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) # last reg_branch is used to generate proposal from # encode feature map when as_two_stage is True. num_pred = (self.transformer.decoder.num_layers + 1) if \ self.as_two_stage else self.transformer.decoder.num_layers if self.with_box_refine: self.cls_branches = _get_clones(fc_cls, num_pred) self.reg_branches = _get_clones(reg_branch, num_pred) else: self.cls_branches = nn.ModuleList( [fc_cls for _ in range(num_pred)]) self.reg_branches = nn.ModuleList( [reg_branch for _ in range(num_pred)]) if not self.as_two_stage: self.query_embedding = nn.Embedding(self.num_query, self.embed_dims * 2) def init_weights(self): """Initialize weights of the DeformDETR head.""" self.transformer.init_weights() if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) for m in self.cls_branches: nn.init.constant_(m.bias, bias_init) for m in self.reg_branches: constant_init(m[-1], 0, bias=0) nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) if self.as_two_stage: for m in self.reg_branches: nn.init.constant_(m[-1].bias.data[2:], 0.0) def forward(self, mlvl_feats, img_metas): """Forward function. Args: mlvl_feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor with shape (N, C, H, W). img_metas (list[dict]): List of image information. Returns: all_cls_scores (Tensor): Outputs from the classification head, \ shape [nb_dec, bs, num_query, cls_out_channels]. Note \ cls_out_channels should includes background. all_bbox_preds (Tensor): Sigmoid outputs from the regression \ head with normalized coordinate format (cx, cy, w, h). \ Shape [nb_dec, bs, num_query, 4]. enc_outputs_class (Tensor): The score of each point on encode \ feature map, has shape (N, h*w, num_class). Only when \ as_two_stage is True it would be returned, otherwise \ `None` would be returned. enc_outputs_coord (Tensor): The proposal generate from the \ encode feature map, has shape (N, h*w, 4). Only when \ as_two_stage is True it would be returned, otherwise \ `None` would be returned. """ batch_size = mlvl_feats[0].size(0) input_img_h, input_img_w = img_metas[0]['batch_input_shape'] img_masks = mlvl_feats[0].new_ones( (batch_size, input_img_h, input_img_w)) for img_id in range(batch_size): img_h, img_w, _ = img_metas[img_id]['img_shape'] img_masks[img_id, :img_h, :img_w] = 0 mlvl_masks = [] mlvl_positional_encodings = [] for feat in mlvl_feats: mlvl_masks.append( F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)) mlvl_positional_encodings.append( self.positional_encoding(mlvl_masks[-1])) query_embeds = None if not self.as_two_stage: query_embeds = self.query_embedding.weight hs, init_reference, inter_references, \ enc_outputs_class, enc_outputs_coord = self.transformer( mlvl_feats, mlvl_masks, query_embeds, mlvl_positional_encodings, reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 ) hs = hs.permute(0, 2, 1, 3) outputs_classes = [] outputs_coords = [] for lvl in range(hs.shape[0]): if lvl == 0: reference = init_reference else: reference = inter_references[lvl - 1] reference = inverse_sigmoid(reference) outputs_class = self.cls_branches[lvl](hs[lvl]) tmp = self.reg_branches[lvl](hs[lvl]) if reference.shape[-1] == 4: tmp += reference else: assert reference.shape[-1] == 2 tmp[..., :2] += reference outputs_coord = tmp.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) outputs_classes = torch.stack(outputs_classes) outputs_coords = torch.stack(outputs_coords) if self.as_two_stage: return outputs_classes, outputs_coords, \ enc_outputs_class, \ enc_outputs_coord.sigmoid() else: return outputs_classes, outputs_coords, \ None, None @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def loss(self, all_cls_scores, all_bbox_preds, enc_cls_scores, enc_bbox_preds, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore=None): """"Loss function. Args: all_cls_scores (Tensor): Classification score of all decoder layers, has shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds (Tensor): Sigmoid regression outputs of all decode layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. enc_cls_scores (Tensor): Classification scores of points on encode feature map , has shape (N, h*w, num_classes). Only be passed when as_two_stage is True, otherwise is None. enc_bbox_preds (Tensor): Regression results of each points on the encode feature map, has shape (N, h*w, 4). Only be passed when as_two_stage is True, otherwise is None. gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels_list (list[Tensor]): Ground truth class indices for each image with shape (num_gts, ). img_metas (list[dict]): List of image meta information. gt_bboxes_ignore (list[Tensor], optional): Bounding boxes which can be ignored for each image. Default None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert gt_bboxes_ignore is None, \ f'{self.__class__.__name__} only supports ' \ f'for gt_bboxes_ignore setting to None.' num_dec_layers = len(all_cls_scores) all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] all_gt_bboxes_ignore_list = [ gt_bboxes_ignore for _ in range(num_dec_layers) ] img_metas_list = [img_metas for _ in range(num_dec_layers)] losses_cls, losses_bbox, losses_iou = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_gt_bboxes_list, all_gt_labels_list, img_metas_list, all_gt_bboxes_ignore_list) loss_dict = dict() # loss of proposal generated from encode feature map. if enc_cls_scores is not None: binary_labels_list = [ torch.zeros_like(gt_labels_list[i]) for i in range(len(img_metas)) ] enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ self.loss_single(enc_cls_scores, enc_bbox_preds, gt_bboxes_list, binary_labels_list, img_metas, gt_bboxes_ignore) loss_dict['enc_loss_cls'] = enc_loss_cls loss_dict['enc_loss_bbox'] = enc_losses_bbox loss_dict['enc_loss_iou'] = enc_losses_iou # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_bbox'] = losses_bbox[-1] loss_dict['loss_iou'] = losses_iou[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i num_dec_layer += 1 return loss_dict @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) def get_bboxes(self, all_cls_scores, all_bbox_preds, enc_cls_scores, enc_bbox_preds, img_metas, rescale=False): """Transform network outputs for a batch into bbox predictions. Args: all_cls_scores (Tensor): Classification score of all decoder layers, has shape [nb_dec, bs, num_query, cls_out_channels]. all_bbox_preds (Tensor): Sigmoid regression outputs of all decode layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape [nb_dec, bs, num_query, 4]. enc_cls_scores (Tensor): Classification scores of points on encode feature map , has shape (N, h*w, num_classes). Only be passed when as_two_stage is True, otherwise is None. enc_bbox_preds (Tensor): Regression results of each points on the encode feature map, has shape (N, h*w, 4). Only be passed when as_two_stage is True, otherwise is None. img_metas (list[dict]): Meta information of each image. rescale (bool, optional): If True, return boxes in original image space. Default False. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ The first item is an (n, 5) tensor, where the first 4 columns \ are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ 5-th column is a score between 0 and 1. The second item is a \ (n,) tensor where each item is the predicted class label of \ the corresponding box. """ cls_scores = all_cls_scores[-1] bbox_preds = all_bbox_preds[-1] result_list = [] for img_id in range(len(img_metas)): cls_score = cls_scores[img_id] bbox_pred = bbox_preds[img_id] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score, bbox_pred, img_shape, scale_factor, rescale) result_list.append(proposals) return result_list
13,728
42.037618
98
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/ga_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import MaskedConv2d from ..builder import HEADS from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @HEADS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-based RetinaNet head.""" def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01) ]) self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(GARetinaHead, self).__init__( num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def forward_single(self, x): """Forward feature map of a single scale level.""" cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return cls_score, bbox_pred, shape_pred, loc_pred
3,931
33.491228
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/ld_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.core import bbox_overlaps, multi_apply, reduce_mean from ..builder import HEADS, build_loss from .gfl_head import GFLHead @HEADS.register_module() class LDHead(GFLHead): """Localization distillation Head. (Short description) It utilizes the learned bbox distributions to transfer the localization dark knowledge from teacher to student. Original paper: `Localization Distillation for Object Detection. <https://arxiv.org/abs/2102.12252>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. loss_ld (dict): Config of Localization Distillation Loss (LD), T is the temperature for distillation. """ def __init__(self, num_classes, in_channels, loss_ld=dict( type='LocalizationDistillationLoss', loss_weight=0.25, T=10), **kwargs): super(LDHead, self).__init__(num_classes, in_channels, **kwargs) self.loss_ld = build_loss(loss_ld) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, stride, soft_targets, num_total_samples): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[tuple, Tensor]: Loss components and weight targets. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) soft_targets = soft_targets.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) pos_soft_targets = soft_targets[pos_inds] soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) # ld loss loss_ld = self.loss_ld( pred_corners, soft_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_ld = bbox_pred.sum() * 0 loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum() def forward_train(self, x, out_teacher, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """ Args: x (list[Tensor]): Features from FPN. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple[dict, list]: The loss components and proposals of each image. - losses (dict[str, Tensor]): A dictionary of loss components. - proposal_list (list[Tensor]): Proposals of each image. """ outs = self(x) soft_target = out_teacher[1] if gt_labels is None: loss_inputs = outs + (gt_bboxes, soft_target, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) return losses, proposal_list @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, soft_target, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, losses_dfl, losses_ld, \ avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, soft_target, num_total_samples=num_total_samples) avg_factor = sum(avg_factor) + 1e-6 avg_factor = reduce_mean(avg_factor).item() losses_bbox = [x / avg_factor for x in losses_bbox] losses_dfl = [x / avg_factor for x in losses_dfl] return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl, loss_ld=losses_ld)
10,636
39.599237
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/ssd_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, multi_apply) from ..builder import HEADS from ..losses import smooth_l1_loss from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @HEADS.register_module() class SSDHead(AnchorHead): """SSD head used in https://arxiv.org/abs/1512.02325. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Default: 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Default: False. conv_cfg (dict): Dictionary to construct and config conv layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: None. act_cfg (dict): Dictionary to construct and config activation layer. Default: None. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), stacked_convs=0, feat_channels=256, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=None, anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], ), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, init_cfg=dict( type='Xavier', layer='Conv2d', distribution='uniform', bias=0)): super(AnchorHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = num_classes + 1 # add background class self.prior_generator = build_prior_generator(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = build_bbox_coder(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg # set sampling=False for archor_target self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False @property def num_anchors(self): """ Returns: list[int]: Number of base_anchors on each point of each level. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors def _init_layers(self): """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # TODO: Use registry to choose ConvModule type conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule for channel, num_base_priors in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel # build stacked conv tower, not used in default ssd for i in range(self.stacked_convs): cls_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels # SSD-Lite head if self.use_depthwise: cls_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append( nn.Conv2d( in_channel, num_base_priors * self.cls_out_channels, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) reg_layers.append( nn.Conv2d( in_channel, num_base_priors * 4, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single image. Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
14,791
40.318436
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/fcos_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Scale from mmcv.runner import force_fp32 from mmdet.core import multi_apply, reduce_mean from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead INF = 1e8 @HEADS.register_module() class FCOSHead(AnchorFreeHead): """Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to suppress low-quality predictions. Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training tricks used in official repo, which will bring remarkable mAP gains of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for more detail. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. strides (list[int] | list[tuple[int, int]]): Strides of points in multiple feature levels. Default: (4, 8, 16, 32, 64). regress_ranges (tuple[tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Default: False. center_sample_radius (float): Radius of center sampling. Default: 1.5. norm_on_bbox (bool): If true, normalize the regression targets with FPN strides. Default: False. centerness_on_reg (bool): If true, position centerness on the regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. loss_centerness (dict): Config of centerness loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes, in_channels, regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling=False, center_sample_radius=1.5, norm_on_bbox=False, centerness_on_reg=False, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs): self.regress_ranges = regress_ranges self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.norm_on_bbox = norm_on_bbox self.centerness_on_reg = centerness_on_reg super().__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.loss_centerness = build_loss(loss_centerness) def _init_layers(self): """Initialize layers of the head.""" super()._init_layers() self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is \ num_points * 4. centernesses (list[Tensor]): centerness for each scale level, \ each is a 4D-tensor, the channel number is num_points * 1. """ return multi_apply(self.forward_single, feats, self.scales, self.strides) def forward_single(self, x, scale, stride): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness \ predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: bbox_pred = F.relu(bbox_pred) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() return cls_score, bbox_pred, centerness @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): centerness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes, gt_labels) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = torch.tensor( len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) num_pos = max(reduce_mean(num_pos), 1.0) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) # centerness weighted iou loss centerness_denorm = max( reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) if len(pos_inds) > 0: pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=centerness_denorm) loss_centerness = self.loss_centerness( pos_centerness, pos_centerness_targets, avg_factor=num_pos) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: concat_lvl_labels (list[Tensor]): Labels of each level. \ concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self._get_target_single, gt_bboxes_list, gt_labels_list, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return concat_lvl_labels, concat_lvl_bbox_targets def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, num_points_per_lvl): """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = gt_labels.size(0) if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] return labels, bbox_targets def centerness_target(self, pos_bbox_targets): """Compute centerness targets. Args: pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape (num_pos, 4) Returns: Tensor: Centerness target. """ # only calculate pos centerness targets, otherwise there may be nan left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] if len(left_right) == 0: centerness_targets = left_right[..., 0] else: centerness_targets = ( left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(centerness_targets) def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map size. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `FCOSHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') y, x = super()._get_points_single(featmap_size, stride, dtype, device) points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), dim=-1) + stride // 2 return points
19,528
42.015419
113
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/lad_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.core import bbox_overlaps, multi_apply from ..builder import HEADS from .paa_head import PAAHead, levels_to_images @HEADS.register_module() class LADHead(PAAHead): """Label Assignment Head from the paper: `Improving Object Detection by Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_""" @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def get_label_assignment(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Get label assignment (from teacher). Args: cls_scores (list[Tensor]): Box scores for each scale level. Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. Returns: tuple: Returns a tuple containing label assignment variables. - labels (Tensor): Labels of all anchors, each with shape (num_anchors,). - labels_weight (Tensor): Label weights of all anchor. each with shape (num_anchors,). - bboxes_target (Tensor): BBox targets of all anchors. each with shape (num_anchors, 4). - bboxes_weight (Tensor): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds_flatten (Tensor): Contains all index of positive sample in all anchor. - pos_anchors (Tensor): Positive anchors. - num_pos (int): Number of positive anchors. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) if num_pos: pos_anchors = flatten_anchors[pos_inds_flatten] else: pos_anchors = None label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) return label_assignment_results def forward_train(self, x, label_assignment_results, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, **kwargs): """Forward train with the available label assignment (student receives from teacher). Args: x (list[Tensor]): Features from FPN. label_assignment_results (tuple): As the outputs defined in the function `self.get_label_assignment`. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Returns: losses: (dict[str, Tensor]): A dictionary of loss components. """ outs = self(x) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss( *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore, label_assignment_results=label_assignment_results) return losses @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None, label_assignment_results=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. label_assignment_results (tuple): As the outputs defined in the function `self.get_label_assignment`. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( pos_anchors, bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
10,058
42.171674
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/yolo_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, normal_init) from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, images_to_levels, multi_apply, multiclass_nms) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class YOLOV3Head(BaseDenseHead, BBoxTestMixin): """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. Args: num_classes (int): The number of object classes (w/o background) in_channels (List[int]): Number of input channels per scale. out_channels (List[int]): The number of output channels per scale before the final 1x1 layer. Default: (1024, 512, 256). anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. featmap_strides (List[int]): The stride of each scale. Should be in descending order. Default: (32, 16, 8). one_hot_smoother (float): Set a non-zero value to enable label-smooth Default: 0. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). loss_cls (dict): Config of classification loss. loss_conf (dict): Config of confidence loss. loss_xy (dict): Config of xy coordinate loss. loss_wh (dict): Config of wh coordinate loss. train_cfg (dict): Training config of YOLOV3 head. Default: None. test_cfg (dict): Testing config of YOLOV3 head. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, out_channels=(1024, 512, 256), anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], one_hot_smoother=0., conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_wh=dict(type='MSELoss', loss_weight=1.0), train_cfg=None, test_cfg=None, init_cfg=dict( type='Normal', std=0.01, override=dict(name='convs_pred'))): super(YOLOV3Head, self).__init__(init_cfg) # Check params assert (len(in_channels) == len(out_channels) == len(featmap_strides)) self.num_classes = num_classes self.in_channels = in_channels self.out_channels = out_channels self.featmap_strides = featmap_strides self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) if hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self.one_hot_smoother = one_hot_smoother self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.prior_generator = build_prior_generator(anchor_generator) self.loss_cls = build_loss(loss_cls) self.loss_conf = build_loss(loss_conf) self.loss_xy = build_loss(loss_xy) self.loss_wh = build_loss(loss_wh) self.num_base_priors = self.prior_generator.num_base_priors[0] assert len( self.prior_generator.num_base_priors) == len(featmap_strides) self._init_layers() @property def anchor_generator(self): warnings.warn('DeprecationWarning: `anchor_generator` is deprecated, ' 'please use "prior_generator" instead') return self.prior_generator @property def num_anchors(self): """ Returns: int: Number of anchors on each point of feature map. """ warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.num_base_priors @property def num_levels(self): return len(self.featmap_strides) @property def num_attrib(self): """int: number of attributes in pred_map, bboxes (4) + objectness (1) + num_classes""" return 5 + self.num_classes def _init_layers(self): self.convs_bridge = nn.ModuleList() self.convs_pred = nn.ModuleList() for i in range(self.num_levels): conv_bridge = ConvModule( self.in_channels[i], self.out_channels[i], 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) conv_pred = nn.Conv2d(self.out_channels[i], self.num_base_priors * self.num_attrib, 1) self.convs_bridge.append(conv_bridge) self.convs_pred.append(conv_pred) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability for conv_pred, stride in zip(self.convs_pred, self.featmap_strides): bias = conv_pred.bias.reshape(self.num_base_priors, -1) # init objectness with prior of 8 objects per feature map # refer to https://github.com/ultralytics/yolov3 nn.init.constant_(bias.data[:, 4], bias_init_with_prob(8 / (608 / stride)**2)) nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01)) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ assert len(feats) == self.num_levels pred_maps = [] for i in range(self.num_levels): x = feats[i] x = self.convs_bridge[i](x) pred_map = self.convs_pred[i](x) pred_maps.append(pred_map) return tuple(pred_maps), @force_fp32(apply_to=('pred_maps', )) def get_bboxes(self, pred_maps, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. It has been accelerated since PR #5991. Args: pred_maps (list[Tensor]): Raw predictions for a batch of images. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. The shape of the second tensor in the tuple is (n,), and each element represents the class label of the corresponding box. """ assert len(pred_maps) == self.num_levels cfg = self.test_cfg if cfg is None else cfg scale_factors = [img_meta['scale_factor'] for img_meta in img_metas] num_imgs = len(img_metas) featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=pred_maps[0].device) flatten_preds = [] flatten_strides = [] for pred, stride in zip(pred_maps, self.featmap_strides): pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) pred[..., :2].sigmoid_() flatten_preds.append(pred) flatten_strides.append( pred.new_tensor(stride).expand(pred.size(1))) flatten_preds = torch.cat(flatten_preds, dim=1) flatten_bbox_preds = flatten_preds[..., :4] flatten_objectness = flatten_preds[..., 4].sigmoid() flatten_cls_scores = flatten_preds[..., 5:].sigmoid() flatten_anchors = torch.cat(mlvl_anchors) flatten_strides = torch.cat(flatten_strides) flatten_bboxes = self.bbox_coder.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1)) if with_nms and (flatten_objectness.size(0) == 0): return torch.zeros((0, 5)), torch.zeros((0, )) if rescale: flatten_bboxes /= flatten_bboxes.new_tensor( scale_factors).unsqueeze(1) padding = flatten_bboxes.new_zeros(num_imgs, flatten_bboxes.shape[1], 1) flatten_cls_scores = torch.cat([flatten_cls_scores, padding], dim=-1) det_results = [] for (bboxes, scores, objectness) in zip(flatten_bboxes, flatten_cls_scores, flatten_objectness): # Filtering out all predictions with conf < conf_thr conf_thr = cfg.get('conf_thr', -1) if conf_thr > 0: conf_inds = objectness >= conf_thr bboxes = bboxes[conf_inds, :] scores = scores[conf_inds, :] objectness = objectness[conf_inds] det_bboxes, det_labels = multiclass_nms( bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=objectness) det_results.append(tuple([det_bboxes, det_labels])) return det_results @force_fp32(apply_to=('pred_maps', )) def loss(self, pred_maps, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: pred_maps (list[Tensor]): Prediction map for each scale level, shape (N, num_anchors * num_attrib, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(img_metas) device = pred_maps[0][0].device featmap_sizes = [ pred_maps[i].shape[-2:] for i in range(self.num_levels) ] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [mlvl_anchors for _ in range(num_imgs)] responsible_flag_list = [] for img_id in range(len(img_metas)): responsible_flag_list.append( self.prior_generator.responsible_flags(featmap_sizes, gt_bboxes[img_id], device)) target_maps_list, neg_maps_list = self.get_targets( anchor_list, responsible_flag_list, gt_bboxes, gt_labels) losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( self.loss_single, pred_maps, target_maps_list, neg_maps_list) return dict( loss_cls=losses_cls, loss_conf=losses_conf, loss_xy=losses_xy, loss_wh=losses_wh) def loss_single(self, pred_map, target_map, neg_map): """Compute loss of a single image from a batch. Args: pred_map (Tensor): Raw predictions for a single level. target_map (Tensor): The Ground-Truth target for a single level. neg_map (Tensor): The negative masks for a single level. Returns: tuple: loss_cls (Tensor): Classification loss. loss_conf (Tensor): Confidence loss. loss_xy (Tensor): Regression loss of x, y coordinate. loss_wh (Tensor): Regression loss of w, h coordinate. """ num_imgs = len(pred_map) pred_map = pred_map.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) neg_mask = neg_map.float() pos_mask = target_map[..., 4] pos_and_neg_mask = neg_mask + pos_mask pos_mask = pos_mask.unsqueeze(dim=-1) if torch.max(pos_and_neg_mask) > 1.: warnings.warn('There is overlap between pos and neg sample.') pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) pred_xy = pred_map[..., :2] pred_wh = pred_map[..., 2:4] pred_conf = pred_map[..., 4] pred_label = pred_map[..., 5:] target_xy = target_map[..., :2] target_wh = target_map[..., 2:4] target_conf = target_map[..., 4] target_label = target_map[..., 5:] loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) loss_conf = self.loss_conf( pred_conf, target_conf, weight=pos_and_neg_mask) loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) return loss_cls, loss_conf, loss_xy, loss_wh def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, gt_labels_list): """Compute target maps for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_total_anchors, 4). responsible_flag_list (list[list[Tensor]]): Multi level responsible flags of each image. Each element is a tensor of shape (num_total_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. gt_labels_list (list[Tensor]): Ground truth labels of each box. Returns: tuple: Usually returns a tuple containing learning targets. - target_map_list (list[Tensor]): Target map of each level. - neg_map_list (list[Tensor]): Negative map of each level. """ num_imgs = len(anchor_list) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] results = multi_apply(self._get_targets_single, anchor_list, responsible_flag_list, gt_bboxes_list, gt_labels_list) all_target_maps, all_neg_maps = results assert num_imgs == len(all_target_maps) == len(all_neg_maps) target_maps_list = images_to_levels(all_target_maps, num_level_anchors) neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) return target_maps_list, neg_maps_list def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, gt_labels): """Generate matching bounding box prior and converted GT. Args: anchors (list[Tensor]): Multi-level anchors of the image. responsible_flags (list[Tensor]): Multi-level responsible flags of anchors gt_bboxes (Tensor): Ground truth bboxes of single image. gt_labels (Tensor): Ground truth labels of single image. Returns: tuple: target_map (Tensor): Predication target map of each scale level, shape (num_total_anchors, 5+num_classes) neg_map (Tensor): Negative map of each scale level, shape (num_total_anchors,) """ anchor_strides = [] for i in range(len(anchors)): anchor_strides.append( torch.tensor(self.featmap_strides[i], device=gt_bboxes.device).repeat(len(anchors[i]))) concat_anchors = torch.cat(anchors) concat_responsible_flags = torch.cat(responsible_flags) anchor_strides = torch.cat(anchor_strides) assert len(anchor_strides) == len(concat_anchors) == \ len(concat_responsible_flags) assign_result = self.assigner.assign(concat_anchors, concat_responsible_flags, gt_bboxes) sampling_result = self.sampler.sample(assign_result, concat_anchors, gt_bboxes) target_map = concat_anchors.new_zeros( concat_anchors.size(0), self.num_attrib) target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, anchor_strides[sampling_result.pos_inds]) target_map[sampling_result.pos_inds, 4] = 1 gt_labels_one_hot = F.one_hot( gt_labels, num_classes=self.num_classes).float() if self.one_hot_smoother != 0: # label smooth gt_labels_one_hot = gt_labels_one_hot * ( 1 - self.one_hot_smoother ) + self.one_hot_smoother / self.num_classes target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ sampling_result.pos_assigned_gt_inds] neg_map = concat_anchors.new_zeros( concat_anchors.size(0), dtype=torch.uint8) neg_map[sampling_result.neg_inds] = 1 return target_map, neg_map def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale) @force_fp32(apply_to=('pred_maps')) def onnx_export(self, pred_maps, img_metas, with_nms=True): num_levels = len(pred_maps) pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] cfg = self.test_cfg assert len(pred_maps_list) == self.num_levels device = pred_maps_list[0].device batch_size = pred_maps_list[0].shape[0] featmap_sizes = [ pred_maps_list[i].shape[-2:] for i in range(self.num_levels) ] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) # convert to tensor to keep tracing nms_pre_tensor = torch.tensor( cfg.get('nms_pre', -1), device=device, dtype=torch.long) multi_lvl_bboxes = [] multi_lvl_cls_scores = [] multi_lvl_conf_scores = [] for i in range(self.num_levels): # get some key info for current scale pred_map = pred_maps_list[i] stride = self.featmap_strides[i] # (b,h, w, num_anchors*num_attrib) -> # (b,h*w*num_anchors, num_attrib) pred_map = pred_map.permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_attrib) # Inplace operation like # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])``` # would create constant tensor when exporting to onnx pred_map_conf = torch.sigmoid(pred_map[..., :2]) pred_map_rest = pred_map[..., 2:] pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1) pred_map_boxes = pred_map[..., :4] multi_lvl_anchor = mlvl_anchors[i] multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes) bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, pred_map_boxes, stride) # conf and cls conf_pred = torch.sigmoid(pred_map[..., 4]) cls_pred = torch.sigmoid(pred_map[..., 5:]).view( batch_size, -1, self.num_classes) # Cls pred one-hot. # Get top-k prediction from mmdet.core.export import get_k_for_topk nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) if nms_pre > 0: _, topk_inds = conf_pred.topk(nms_pre) batch_inds = torch.arange(batch_size).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = ( bbox_pred.shape[1] * batch_inds + topk_inds) bbox_pred = bbox_pred.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) cls_pred = cls_pred.reshape( -1, self.num_classes)[transformed_inds, :].reshape( batch_size, -1, self.num_classes) conf_pred = conf_pred.reshape(-1, 1)[transformed_inds].reshape( batch_size, -1) # Save the result of current scale multi_lvl_bboxes.append(bbox_pred) multi_lvl_cls_scores.append(cls_pred) multi_lvl_conf_scores.append(conf_pred) # Merge the results of different scales together batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1) batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1) batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1) # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx conf_thr = cfg.get('conf_thr', -1) score_thr = cfg.get('score_thr', -1) # follow original pipeline of YOLOv3 if conf_thr > 0: mask = (batch_mlvl_conf_scores >= conf_thr).float() batch_mlvl_conf_scores *= mask if score_thr > 0: mask = (batch_mlvl_scores > score_thr).float() batch_mlvl_scores *= mask batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(2).expand_as( batch_mlvl_scores) batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores if with_nms: max_output_boxes_per_class = cfg.nms.get( 'max_output_boxes_per_class', 200) iou_threshold = cfg.nms.get('iou_threshold', 0.5) # keep aligned with original pipeline, improve # mAP by 1% for YOLOv3 in ONNX score_threshold = 0 nms_pre = cfg.get('deploy_nms_pre', -1) return add_dummy_nms_for_onnx( batch_mlvl_bboxes, batch_mlvl_scores, max_output_boxes_per_class, iou_threshold, score_threshold, nms_pre, cfg.max_per_img, ) else: return batch_mlvl_bboxes, batch_mlvl_scores
26,329
41.467742
106
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/centripetal_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, normal_init from mmcv.ops import DeformConv2d from mmdet.core import multi_apply from ..builder import HEADS, build_loss from .corner_head import CornerHead @HEADS.register_module() class CentripetalHead(CornerHead): """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection. CentripetalHead inherits from :class:`CornerHead`. It removes the embedding branch and adds guiding shift and centripetal shift branches. More details can be found in the `paper <https://arxiv.org/abs/2003.09119>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Default: 2. corner_emb_channels (int): Channel of embedding vector. Default: 1. train_cfg (dict | None): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CornerHead. Default: None. loss_heatmap (dict | None): Config of corner heatmap loss. Default: GaussianFocalLoss. loss_embedding (dict | None): Config of corner embedding loss. Default: AssociativeEmbeddingLoss. loss_offset (dict | None): Config of corner offset loss. Default: SmoothL1Loss. loss_guiding_shift (dict): Config of guiding shift loss. Default: SmoothL1Loss. loss_centripetal_shift (dict): Config of centripetal shift loss. Default: SmoothL1Loss. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, *args, centripetal_shift_channels=2, guiding_shift_channels=2, feat_adaption_conv_kernel=3, loss_guiding_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=0.05), loss_centripetal_shift=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' assert centripetal_shift_channels == 2, ( 'CentripetalHead only support centripetal_shift_channels == 2') self.centripetal_shift_channels = centripetal_shift_channels assert guiding_shift_channels == 2, ( 'CentripetalHead only support guiding_shift_channels == 2') self.guiding_shift_channels = guiding_shift_channels self.feat_adaption_conv_kernel = feat_adaption_conv_kernel super(CentripetalHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) self.loss_guiding_shift = build_loss(loss_guiding_shift) self.loss_centripetal_shift = build_loss(loss_centripetal_shift) def _init_centripetal_layers(self): """Initialize centripetal layers. Including feature adaption deform convs (feat_adaption), deform offset prediction convs (dcn_off), guiding shift (guiding_shift) and centripetal shift ( centripetal_shift). Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_feat_adaption = nn.ModuleList() self.br_feat_adaption = nn.ModuleList() self.tl_dcn_offset = nn.ModuleList() self.br_dcn_offset = nn.ModuleList() self.tl_guiding_shift = nn.ModuleList() self.br_guiding_shift = nn.ModuleList() self.tl_centripetal_shift = nn.ModuleList() self.br_centripetal_shift = nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.br_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.tl_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.br_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.tl_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.br_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.tl_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) self.br_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) def _init_layers(self): """Initialize layers for CentripetalHead. Including two parts: CornerHead layers and CentripetalHead layers """ super()._init_layers() # using _init_layers in CornerHead self._init_centripetal_layers() def init_weights(self): super(CentripetalHead, self).init_weights() for i in range(self.num_feat_levels): normal_init(self.tl_feat_adaption[i], std=0.01) normal_init(self.br_feat_adaption[i], std=0.01) normal_init(self.tl_dcn_offset[i].conv, std=0.1) normal_init(self.br_dcn_offset[i].conv, std=0.1) _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] _ = [ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] ] _ = [ x.conv.reset_parameters() for x in self.br_centripetal_shift[i] ] def forward_single(self, x, lvl_ind): """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. Returns: tuple[Tensor]: A tuple of CentripetalHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_guiding_shift (Tensor): Predicted top-left guiding shift heatmap. - br_guiding_shift (Tensor): Predicted bottom-right guiding shift heatmap. - tl_centripetal_shift (Tensor): Predicted top-left centripetal shift heatmap. - br_centripetal_shift (Tensor): Predicted bottom-right centripetal shift heatmap. """ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( ).forward_single( x, lvl_ind, return_pool=True) tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, tl_dcn_offset) br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, br_dcn_offset) tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( tl_feat_adaption) br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( br_feat_adaption) result_list = [ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift ] return result_list def loss(self, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [left, top, right, bottom] format. gt_labels (list[Tensor]): Class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. - guiding_loss (list[Tensor]): Guiding shift losses of all feature levels. - centripetal_loss (list[Tensor]): Centripetal shift losses of all feature levels. """ targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb, with_guiding_shift=True, with_centripetal_shift=True) mlvl_targets = [targets for _ in range(self.num_feat_levels)] [det_losses, off_losses, guiding_losses, centripetal_losses ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, mlvl_targets) loss_dict = dict( det_loss=det_losses, off_loss=off_losses, guiding_loss=guiding_losses, centripetal_loss=centripetal_losses) return loss_dict def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift, targets): """Compute losses for single level. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_guiding_shift (Tensor): Top-left guiding shift for current level with shape (N, guiding_shift_channels, H, W). br_guiding_shift (Tensor): Bottom-right guiding shift for current level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shift (Tensor): Top-left centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shift (Tensor): Bottom-right centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - off_loss (Tensor): Corner offset loss. - guiding_loss (Tensor): Guiding shift loss. - centripetal_loss (Tensor): Centripetal shift loss. """ targets['corner_embedding'] = None det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None, None, tl_off, br_off, targets) gt_tl_guiding_shift = targets['topleft_guiding_shift'] gt_br_guiding_shift = targets['bottomright_guiding_shift'] gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] gt_tl_heatmap = targets['topleft_heatmap'] gt_br_heatmap = targets['bottomright_heatmap'] # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_heatmap) br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_heatmap) # Guiding shift loss tl_guiding_loss = self.loss_guiding_shift( tl_guiding_shift, gt_tl_guiding_shift, tl_mask, avg_factor=tl_mask.sum()) br_guiding_loss = self.loss_guiding_shift( br_guiding_shift, gt_br_guiding_shift, br_mask, avg_factor=br_mask.sum()) guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 # Centripetal shift loss tl_centripetal_loss = self.loss_centripetal_shift( tl_centripetal_shift, gt_tl_centripetal_shift, tl_mask, avg_factor=tl_mask.sum()) br_centripetal_loss = self.loss_centripetal_shift( br_centripetal_shift, gt_br_centripetal_shift, br_mask, avg_factor=br_mask.sum()) centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 return det_loss, off_loss, guiding_loss, centripetal_loss def get_bboxes(self, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=None, br_emb=None, tl_centripetal_shift=tl_centripetal_shifts[-1][ img_id:img_id + 1, :], br_centripetal_shift=br_centripetal_shifts[-1][ img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list
19,811
45.28972
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/paa_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmcv.runner import force_fp32 from mmdet.core import multi_apply, multiclass_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps from mmdet.models import HEADS from mmdet.models.dense_heads import ATSSHead EPS = 1e-12 try: import sklearn.mixture as skm except ImportError: skm = None def levels_to_images(mlvl_tensor): """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[torch.Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] @HEADS.register_module() class PAAHead(ATSSHead): """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU Prediction for Object Detection. Code is modified from the `official github repo <https://github.com/kkhoot/PAA/blob/master/paa_core /modeling/rpn/paa/loss.py>`_. More details can be found in the `paper <https://arxiv.org/abs/2007.08103>`_ . Args: topk (int): Select topk samples with smallest loss in each level. score_voting (bool): Whether to use score voting in post-process. covariance_type : String describing the type of covariance parameters to be used in :class:`sklearn.mixture.GaussianMixture`. It must be one of: - 'full': each component has its own general covariance matrix - 'tied': all components share the same general covariance matrix - 'diag': each component has its own diagonal covariance matrix - 'spherical': each component has its own single variance Default: 'diag'. From 'full' to 'spherical', the gmm fitting process is faster yet the performance could be influenced. For most cases, 'diag' should be a good choice. """ def __init__(self, *args, topk=9, score_voting=True, covariance_type='diag', **kwargs): # topk used in paa reassign process self.topk = topk self.with_score_voting = score_voting self.covariance_type = covariance_type super(PAAHead, self).__init__(*args, **kwargs) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when are computing the loss. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( flatten_anchors[pos_inds_flatten], bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, iou_target.clamp(min=EPS), avg_factor=iou_target.sum()) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight, bbox_target, bbox_weight, pos_inds): """Calculate loss of all potential positive samples obtained from first match process. Args: anchors (list[Tensor]): Anchors of each scale. cls_score (Tensor): Box scores of single image with shape (num_anchors, num_classes) bbox_pred (Tensor): Box energies / deltas of single image with shape (num_anchors, 4) label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_target (dict): Regression target of each anchor with shape (num_anchors, 4). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. Returns: Tensor: Losses of all positive samples in single image. """ if not len(pos_inds): return cls_score.new([]), anchors_all_level = torch.cat(anchors, 0) pos_scores = cls_score[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_label = label[pos_inds] pos_label_weight = label_weight[pos_inds] pos_bbox_target = bbox_target[pos_inds] pos_bbox_weight = bbox_weight[pos_inds] pos_anchors = anchors_all_level[pos_inds] pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) # to keep loss dimension loss_cls = self.loss_cls( pos_scores, pos_label, pos_label_weight, avg_factor=self.loss_cls.loss_weight, reduction_override='none') loss_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, pos_bbox_weight, avg_factor=self.loss_bbox.loss_weight, reduction_override='none') loss_cls = loss_cls.sum(-1) pos_loss = loss_bbox + loss_cls return pos_loss, def paa_reassign(self, pos_losses, label, label_weight, bbox_weight, pos_inds, pos_gt_inds, anchors): """Fit loss to GMM distribution and separate positive, ignore, negative samples again with GMM model. Args: pos_losses (Tensor): Losses of all positive samples in single image. label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. pos_gt_inds (Tensor): Gt_index of all positive samples got from first assign process. anchors (list[Tensor]): Anchors of each scale. Returns: tuple: Usually returns a tuple containing learning targets. - label (Tensor): classification target of each anchor after paa assign, with shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each anchor after paa assign, with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). - num_pos (int): The number of positive samples after paa assign. """ if not len(pos_inds): return label, label_weight, bbox_weight, 0 label = label.clone() label_weight = label_weight.clone() bbox_weight = bbox_weight.clone() num_gt = pos_gt_inds.max() + 1 num_level = len(anchors) num_anchors_each_level = [item.size(0) for item in anchors] num_anchors_each_level.insert(0, 0) inds_level_interval = np.cumsum(num_anchors_each_level) pos_level_mask = [] for i in range(num_level): mask = (pos_inds >= inds_level_interval[i]) & ( pos_inds < inds_level_interval[i + 1]) pos_level_mask.append(mask) pos_inds_after_paa = [label.new_tensor([])] ignore_inds_after_paa = [label.new_tensor([])] for gt_ind in range(num_gt): pos_inds_gmm = [] pos_loss_gmm = [] gt_mask = pos_gt_inds == gt_ind for level in range(num_level): level_mask = pos_level_mask[level] level_gt_mask = level_mask & gt_mask value, topk_inds = pos_losses[level_gt_mask].topk( min(level_gt_mask.sum(), self.topk), largest=False) pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) pos_loss_gmm.append(value) pos_inds_gmm = torch.cat(pos_inds_gmm) pos_loss_gmm = torch.cat(pos_loss_gmm) # fix gmm need at least two sample if len(pos_inds_gmm) < 2: continue device = pos_inds_gmm.device pos_loss_gmm, sort_inds = pos_loss_gmm.sort() pos_inds_gmm = pos_inds_gmm[sort_inds] pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() means_init = np.array([min_loss, max_loss]).reshape(2, 1) weights_init = np.array([0.5, 0.5]) precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full if self.covariance_type == 'spherical': precisions_init = precisions_init.reshape(2) elif self.covariance_type == 'diag': precisions_init = precisions_init.reshape(2, 1) elif self.covariance_type == 'tied': precisions_init = np.array([[1.0]]) if skm is None: raise ImportError('Please run "pip install sklearn" ' 'to install sklearn first.') gmm = skm.GaussianMixture( 2, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, covariance_type=self.covariance_type) gmm.fit(pos_loss_gmm) gmm_assignment = gmm.predict(pos_loss_gmm) scores = gmm.score_samples(pos_loss_gmm) gmm_assignment = torch.from_numpy(gmm_assignment).to(device) scores = torch.from_numpy(scores).to(device) pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( gmm_assignment, scores, pos_inds_gmm) pos_inds_after_paa.append(pos_inds_temp) ignore_inds_after_paa.append(ignore_inds_temp) pos_inds_after_paa = torch.cat(pos_inds_after_paa) ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) reassign_ids = pos_inds[reassign_mask] label[reassign_ids] = self.num_classes label_weight[ignore_inds_after_paa] = 0 bbox_weight[reassign_ids] = 0 num_pos = len(pos_inds_after_paa) return label, label_weight, bbox_weight, num_pos def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm): """A general separation scheme for gmm model. It separates a GMM distribution of candidate samples into three parts, 0 1 and uncertain areas, and you can implement other separation schemes by rewriting this function. Args: gmm_assignment (Tensor): The prediction of GMM which is of shape (num_samples,). The 0/1 value indicates the distribution that each sample comes from. scores (Tensor): The probability of sample coming from the fit GMM distribution. The tensor is of shape (num_samples,). pos_inds_gmm (Tensor): All the indexes of samples which are used to fit GMM model. The tensor is of shape (num_samples,) Returns: tuple[Tensor]: The indices of positive and ignored samples. - pos_inds_temp (Tensor): Indices of positive samples. - ignore_inds_temp (Tensor): Indices of ignore samples. """ # The implementation is (c) in Fig.3 in origin paper instead of (b). # You can refer to issues such as # https://github.com/kkhoot/PAA/issues/8 and # https://github.com/kkhoot/PAA/issues/9. fgs = gmm_assignment == 0 pos_inds_temp = fgs.new_tensor([], dtype=torch.long) ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) if fgs.nonzero().numel(): _, pos_thr_ind = scores[fgs].topk(1) pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] ignore_inds_temp = pos_inds_gmm.new_tensor([]) return pos_inds_temp, ignore_inds_temp def get_targets( self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True, ): """Get targets for PAA head. This method is almost the same as `AnchorHead.get_targets()`. We direct return the results from _get_targets_single instead map it to levels by images_to_levels function. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - labels (list[Tensor]): Labels of all anchors, each with shape (num_anchors,). - label_weights (list[Tensor]): Label weights of all anchor. each with shape (num_anchors,). - bbox_targets (list[Tensor]): BBox targets of all anchors. each with shape (num_anchors, 4). - bbox_weights (list[Tensor]): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds (list[Tensor]): Contains all index of positive sample in all anchor. - gt_inds (list[Tensor]): Contains all gt_index of positive sample in all anchor. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, valid_neg_inds, sampling_result) = results # Due to valid flag of anchors, we have to calculate the real pos_inds # in origin anchor set. pos_inds = [] for i, single_labels in enumerate(labels): pos_mask = (0 <= single_labels) & ( single_labels < self.num_classes) pos_inds.append(pos_mask.nonzero().view(-1)) gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, gt_inds) def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. This method is same as `AnchorHead._get_targets_single()`. """ assert unmap_outputs, 'We must map outputs back to the original' \ 'set of anchors in PAAhead' return super(ATSSHead, self)._get_targets_single( flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, score_factors=None, img_metas=None, cfg=None, rescale=False, with_nms=True, **kwargs): assert with_nms, 'PAA only supports "with_nms=True" now and it ' \ 'means PAAHead does not support ' \ 'test-time augmentation' return super(ATSSHead, self).get_bboxes(cls_scores, bbox_preds, score_factors, img_metas, cfg, rescale, with_nms, **kwargs) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factors from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_score_factors = [] for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() if 0 < nms_pre < scores.shape[0]: max_scores, _ = (scores * score_factor[:, None]).sqrt().max(dim=1) _, topk_inds = max_scores.topk(nms_pre) priors = priors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] score_factor = score_factor[topk_inds] bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_score_factors.append(score_factor) return self._bbox_post_process(mlvl_scores, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms, mlvl_score_factors, **kwargs) def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, scale_factor, cfg, rescale=False, with_nms=True, mlvl_score_factors=None, **kwargs): """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually with_nms is False is used for aug test. Args: mlvl_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_bboxes, num_class). mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale levels of a single image, each item has shape (num_bboxes, 4). scale_factor (ndarray, optional): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. mlvl_score_factors (list[Tensor], optional): Score factor from all scale levels of a single image, each item has shape (num_bboxes, ). Default: None. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) mlvl_iou_preds = torch.cat(mlvl_score_factors) mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt() det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_nms_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=None) if self.with_score_voting and len(det_bboxes) > 0: det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, mlvl_bboxes, mlvl_nms_scores, cfg.score_thr) return det_bboxes, det_labels def score_voting(self, det_bboxes, det_labels, mlvl_bboxes, mlvl_nms_scores, score_thr): """Implementation of score voting method works on each remaining boxes after NMS procedure. Args: det_bboxes (Tensor): Remaining boxes after NMS procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). det_labels (Tensor): The label of remaining boxes, with shape (k, 1),Labels are 0-based. mlvl_bboxes (Tensor): All boxes before the NMS procedure, with shape (num_anchors,4). mlvl_nms_scores (Tensor): The scores of all boxes which is used in the NMS procedure, with shape (num_anchors, num_class) score_thr (float): The score threshold of bboxes. Returns: tuple: Usually returns a tuple containing voting results. - det_bboxes_voted (Tensor): Remaining boxes after score voting procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). - det_labels_voted (Tensor): Label of remaining bboxes after voting, with shape (num_anchors,). """ candidate_mask = mlvl_nms_scores > score_thr candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False) candidate_inds = candidate_mask_nonzeros[:, 0] candidate_labels = candidate_mask_nonzeros[:, 1] candidate_bboxes = mlvl_bboxes[candidate_inds] candidate_scores = mlvl_nms_scores[candidate_mask] det_bboxes_voted = [] det_labels_voted = [] for cls in range(self.cls_out_channels): candidate_cls_mask = candidate_labels == cls if not candidate_cls_mask.any(): continue candidate_cls_scores = candidate_scores[candidate_cls_mask] candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] det_cls_mask = det_labels == cls det_cls_bboxes = det_bboxes[det_cls_mask].view( -1, det_bboxes.size(-1)) det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], candidate_cls_bboxes) for det_ind in range(len(det_cls_bboxes)): single_det_ious = det_candidate_ious[det_ind] pos_ious_mask = single_det_ious > 0.01 pos_ious = single_det_ious[pos_ious_mask] pos_bboxes = candidate_cls_bboxes[pos_ious_mask] pos_scores = candidate_cls_scores[pos_ious_mask] pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * pos_scores)[:, None] voted_box = torch.sum( pis * pos_bboxes, dim=0) / torch.sum( pis, dim=0) voted_score = det_cls_bboxes[det_ind][-1:][None, :] det_bboxes_voted.append( torch.cat((voted_box[None, :], voted_score), dim=1)) det_labels_voted.append(cls) det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) det_labels_voted = det_labels.new_tensor(det_labels_voted) return det_bboxes_voted, det_labels_voted
34,046
43.976222
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/retina_sepbn_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In RetinaHead, conv/norm layers are shared across different FPN levels, while in RetinaSepBNHead, conv layers are shared across different FPN levels, but BN layers are separated. """ def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super(RetinaSepBNHead, self).__init__( num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" super(RetinaSepBNHead, self).init_weights() for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for i, x in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return cls_scores, bbox_preds
4,566
37.378151
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/anchor_free_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from abc import abstractmethod import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import force_fp32 from mmdet.core import build_bbox_coder, multi_apply from mmdet.core.anchor.point_generator import MlvlPointGenerator from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): """Anchor-free head (FCOS, Fovea, RepPoints, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. stacked_convs (int): Number of stacking convs of the head. strides (tuple): Downsample factor of each feature map. dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. bbox_coder (dict): Config of bbox coder. Defaults 'DistancePointBBoxCoder'. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 _version = 1 def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), dcn_on_last_conv=False, conv_bias='auto', loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), bbox_coder=dict(type='DistancePointBBoxCoder'), conv_cfg=None, norm_cfg=None, train_cfg=None, test_cfg=None, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))): super(AnchorFreeHead, self).__init__(init_cfg) self.num_classes = num_classes self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.bbox_coder = build_bbox_coder(bbox_coder) self.prior_generator = MlvlPointGenerator(strides) # In order to keep a more general interface and be consistent with # anchor_head. We can think of point like one anchor self.num_base_priors = self.prior_generator.num_base_priors[0] self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self): """Initialize classification conv layers of the head.""" self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self): """Initialize bbox regression conv layers of the head.""" self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self): """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Hack some keys of the model state dict so that can load checkpoints of previous version.""" version = local_metadata.get('version', None) if version is None: # the key is different in early versions # for example, 'fcos_cls' become 'conv_cls' now bbox_head_keys = [ k for k in state_dict.keys() if k.startswith(prefix) ] ori_predictor_keys = [] new_predictor_keys = [] # e.g. 'fcos_cls' or 'fcos_reg' for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') conv_name = None if key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: assert NotImplementedError if conv_name is not None: key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop(-1) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop( ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually contain classification scores and bbox predictions. cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, feats)[:2] def forward_single(self, x): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: Scores for each class, bbox predictions, features after classification and regression conv layers, some models needs these features like FCOS. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return cls_score, bbox_pred, cls_feat, reg_feat @abstractmethod @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. """ raise NotImplementedError @abstractmethod def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). """ raise NotImplementedError def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points of a single scale level. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `AnchorFreeHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') h, w = featmap_size # First create Range with the default dtype, than convert to # target `dtype` for onnx exporting. x_range = torch.arange(w, device=device).to(dtype) y_range = torch.arange(h, device=device).to(dtype) y, x = torch.meshgrid(y_range, x_range) if flatten: y = y.flatten() x = x.flatten() return y, x def get_points(self, featmap_sizes, dtype, device, flatten=False): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. dtype (torch.dtype): Type of points. device (torch.device): Device of points. Returns: tuple: points of each image. """ warnings.warn( '`get_points` in `AnchorFreeHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of all levels ' 'with `self.prior_generator.grid_priors` ') mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append( self._get_points_single(featmap_sizes[i], self.strides[i], dtype, device, flatten)) return mlvl_points def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
13,958
38.769231
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/pisa_ssd_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import multi_apply from ..builder import HEADS from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p from .ssd_head import SSDHead # TODO: add loss evaluator for SSD @HEADS.register_module() class PISASSDHead(SSDHead): def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes of each image with shape (num_obj, 4). gt_labels (list[Tensor]): Ground truth labels of each image with shape (num_obj, 4). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. Default: None. Returns: dict: Loss dict, comprise classification loss regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view(-1), all_label_weights.view(-1), all_bbox_targets.view(-1, 4), all_bbox_weights.view(-1, 4)) # apply ISR-P if isr_cfg is not None: all_targets = isr_p( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_bbox_preds.view(-1, 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) # add CARL loss carl_loss_cfg = self.train_cfg.get('carl', None) if carl_loss_cfg is not None: loss_carl = carl_loss( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_targets[0], all_bbox_preds.view(-1, 4), all_targets[2], SmoothL1Loss(beta=1.), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if carl_loss_cfg is not None: loss_dict.update(loss_carl) return loss_dict
5,598
38.70922
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/base_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg): super(BaseMaskHead, self).__init__(init_cfg) @abstractmethod def loss(self, **kwargs): pass @abstractmethod def get_results(self, **kwargs): """Get precessed :obj:`InstanceData` of multiple images.""" pass def forward_train(self, x, gt_labels, gt_masks, img_metas, gt_bboxes=None, gt_bboxes_ignore=None, positive_infos=None, **kwargs): """ Args: x (list[Tensor] | tuple[Tensor]): Features from FPN. Each has a shape (B, C, H, W). gt_labels (list[Tensor]): Ground truth labels of all images. each has a shape (num_gts,). gt_masks (list[Tensor]) : Masks for each bbox, has a shape (num_gts, h , w). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (list[Tensor]): Ground truth bboxes of the image, each item has a shape (num_gts, 4). gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be ignored, each item has a shape (num_ignored_gts, 4). positive_infos (list[:obj:`InstanceData`], optional): Information of positive samples. Used when the label assignment is done outside the MaskHead, e.g., in BboxHead in YOLACT or CondInst, etc. When the label assignment is done in MaskHead, it would be None, like SOLO. All values in it should have shape (num_positive_samples, *). Returns: dict[str, Tensor]: A dictionary of loss components. """ if positive_infos is None: outs = self(x) else: outs = self(x, positive_infos) assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \ 'even if only one item is returned' loss = self.loss( *outs, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, positive_infos=positive_infos, **kwargs) return loss def simple_test(self, feats, img_metas, rescale=False, instances_list=None, **kwargs): """Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. instances_list (list[obj:`InstanceData`], optional): Detection results of each image after the post process. Only exist if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc. Returns: list[obj:`InstanceData`]: Instance segmentation \ results of each image after the post process. \ Each item usually contains following keys. \ - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Has a shape (num_instances,). - masks (Tensor): Processed mask results, has a shape (num_instances, h, w). """ if instances_list is None: outs = self(feats) else: outs = self(feats, instances_list=instances_list) mask_inputs = outs + (img_metas, ) results_list = self.get_results( *mask_inputs, rescale=rescale, instances_list=instances_list, **kwargs) return results_list def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does ' f'not support ONNX EXPORT')
4,539
37.803419
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/solo_head.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmdet.core import InstanceData, mask_matrix_nms, multi_apply from mmdet.core.utils import center_of_mass, generate_coordinate from mmdet.models.builder import HEADS, build_loss from .base_mask_head import BaseMaskHead @HEADS.register_module() class SOLOHead(BaseMaskHead): """SOLO mask head used in `SOLO: Segmenting Objects by Locations. <https://arxiv.org/abs/1912.04488>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. Default: 256. stacked_convs (int): Number of stacking convs of the head. Default: 4. strides (tuple): Downsample factor of each feature map. scale_ranges (tuple[tuple[int, int]]): Area range of multiple level masks, in the format [(min1, max1), (min2, max2), ...]. A range of (16, 64) means the area range between (16, 64). pos_scale (float): Constant scale factor to control the center region. num_grids (list[int]): Divided image into a uniform grids, each feature map has a different grid value. The number of output channels is grid ** 2. Default: [40, 36, 24, 16, 12]. cls_down_index (int): The index of downsample operation in classification branch. Default: 0. loss_mask (dict): Config of mask loss. loss_cls (dict): Config of classification loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). train_cfg (dict): Training config of head. test_cfg (dict): Testing config of head. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=None, loss_cls=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), train_cfg=None, test_cfg=None, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], ): super(SOLOHead, self).__init__(init_cfg) self.num_classes = num_classes self.cls_out_channels = self.num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.num_grids = num_grids # number of FPN feats self.num_levels = len(strides) assert self.num_levels == len(scale_ranges) == len(num_grids) self.scale_ranges = scale_ranges self.pos_scale = pos_scale self.cls_down_index = cls_down_index self.loss_cls = build_loss(loss_cls) self.loss_mask = build_loss(loss_mask) self.norm_cfg = norm_cfg self.init_cfg = init_cfg self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _init_layers(self): self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list.append( nn.Conv2d(self.feat_channels, num_grid**2, 1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def resize_feats(self, feats): """Downsample the first feat and upsample last feat in feats.""" out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate(feats[0], scale_factor=0.5, mode='bilinear')) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear')) else: out.append(feats[i]) return out def forward(self, feats): assert len(feats) == self.num_levels feats = self.resize_feats(feats) mlvl_mask_preds = [] mlvl_cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in (self.mask_convs): mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_pred = self.conv_mask_list[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred = F.interpolate( mask_pred.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mlvl_mask_preds.append(mask_pred) mlvl_cls_preds.append(cls_pred) return mlvl_mask_preds, mlvl_cls_preds def loss(self, mlvl_mask_preds, mlvl_cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes=None, **kwargs): """Calculate the loss of total batch. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). gt_labels (list[Tensor]): Labels of multiple images. gt_masks (list[Tensor]): Ground truth masks of multiple images. Each has shape (num_instances, h, w). img_metas (list[dict]): Meta information of multiple images. gt_bboxes (list[Tensor]): Ground truth bboxes of multiple images. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(gt_labels) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds] # `BoolTensor` in `pos_masks` represent # whether the corresponding point is # positive pos_mask_targets, labels, pos_masks = multi_apply( self._get_targets_single, gt_bboxes, gt_labels, gt_masks, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds = [[] for _ in range(num_levels)] mlvl_pos_masks = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): assert num_levels == len(pos_mask_targets[img_id]) for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds[lvl].append( mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...]) mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten()) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds[lvl] = torch.cat( mlvl_pos_mask_preds[lvl], dim=0) mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = sum(item.sum() for item in mlvl_pos_masks) # dice loss loss_mask = [] for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets): if pred.size()[0] == 0: loss_mask.append(pred.sum().unsqueeze(0)) continue loss_mask.append( self.loss_mask(pred, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_bboxes, gt_labels, gt_masks, featmap_sizes=None): """Compute targets for predictions of single image. Args: gt_bboxes (Tensor): Ground truth bbox of each instance, shape (num_gts, 4). gt_labels (Tensor): Ground truth label of each instance, shape (num_gts,). gt_masks (Tensor): Ground truth mask of each instance, shape (num_gts, h, w). featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Default: None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_pos_masks (list[Tensor]): Each element is a `BoolTensor` to represent whether the corresponding point in single level is positive, has shape (num_grid **2). """ device = gt_labels.device gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) mlvl_pos_mask_targets = [] mlvl_labels = [] mlvl_pos_masks = [] for (lower_bound, upper_bound), stride, featmap_size, num_grid \ in zip(self.scale_ranges, self.strides, featmap_sizes, self.num_grids): mask_target = torch.zeros( [num_grid**2, featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes labels = torch.zeros([num_grid, num_grid], dtype=torch.int64, device=device) + self.num_classes pos_mask = torch.zeros([num_grid**2], dtype=torch.bool, device=device) gt_inds = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(gt_inds) == 0: mlvl_pos_mask_targets.append( mask_target.new_zeros(0, featmap_size[0], featmap_size[1])) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) continue hit_gt_bboxes = gt_bboxes[gt_inds] hit_gt_labels = gt_labels[gt_inds] hit_gt_masks = gt_masks[gt_inds, ...] pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - hit_gt_bboxes[:, 0]) * self.pos_scale pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - hit_gt_bboxes[:, 1]) * self.pos_scale # Make sure hit_gt_masks has a value valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 output_stride = stride / 2 for gt_mask, gt_label, pos_h_range, pos_w_range, \ valid_mask_flag in \ zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, pos_w_ranges, valid_mask_flags): if not valid_mask_flag: continue upsampled_size = (featmap_sizes[0][0] * 4, featmap_sizes[0][1] * 4) center_h, center_w = center_of_mass(gt_mask) coord_w = int( (center_w / upsampled_size[1]) // (1. / num_grid)) coord_h = int( (center_h / upsampled_size[0]) // (1. / num_grid)) # left, top, right, down top_box = max( 0, int(((center_h - pos_h_range) / upsampled_size[0]) // (1. / num_grid))) down_box = min( num_grid - 1, int(((center_h + pos_h_range) / upsampled_size[0]) // (1. / num_grid))) left_box = max( 0, int(((center_w - pos_w_range) / upsampled_size[1]) // (1. / num_grid))) right_box = min( num_grid - 1, int(((center_w + pos_w_range) / upsampled_size[1]) // (1. / num_grid))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) left = max(coord_w - 1, left_box) right = min(right_box, coord_w + 1) labels[top:(down + 1), left:(right + 1)] = gt_label # ins gt_mask = np.uint8(gt_mask.cpu().numpy()) # Follow the original implementation, F.interpolate is # different from cv2 and opencv gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride) gt_mask = torch.from_numpy(gt_mask).to(device=device) for i in range(top, down + 1): for j in range(left, right + 1): index = int(i * num_grid + j) mask_target[index, :gt_mask.shape[0], :gt_mask. shape[1]] = gt_mask pos_mask[index] = True mlvl_pos_mask_targets.append(mask_target[pos_mask]) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas, **kwargs): """Get multi-image mask results. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(img_metas)): cls_pred_list = [ mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) for lvl in range(num_levels) ] mask_pred_list = [ mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list = torch.cat(mask_pred_list, dim=0) results = self._get_results_single( cls_pred_list, mask_pred_list, img_meta=img_metas[img_id]) results_list.append(results) return results_list def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=None): """Get processed mask related results of single image. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds (Tensor): Mask prediction of all points in single image, has shape (num_points, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict, optional): Config used in test phase. Default: None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(results, cls_scores): """Generate a empty results.""" results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) results.labels = cls_scores.new_ones(0) return results cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(mask_preds) results = InstanceData(img_meta) featmap_size = mask_preds.size()[-2:] img_shape = results.img_shape ori_shape = results.ori_shape h, w, _ = img_shape upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] if len(cls_scores) == 0: return empty_results(results, cls_scores) inds = score_mask.nonzero() cls_labels = inds[:, 1] # Filter the mask mask with an area is smaller than # stride of corresponding feature level lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) strides = cls_scores.new_ones(lvl_interval[-1]) strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= self.strides[lvl] strides = strides[inds[:, 0]] mask_preds = mask_preds[inds[:, 0]] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(results, cls_scores) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results.masks = masks results.labels = labels results.scores = scores return results @HEADS.register_module() class DecoupledSOLOHead(SOLOHead): """Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations. <https://arxiv.org/abs/1912.04488>`_ Args: init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs): super(DecoupledSOLOHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) def _init_layers(self): self.mask_convs_x = nn.ModuleList() self.mask_convs_y = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 1 if i == 0 else self.feat_channels self.mask_convs_x.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.mask_convs_y.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, feats): assert len(feats) == self.num_levels feats = self.resize_feats(feats) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1) mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1) for mask_layer_x, mask_layer_y in \ zip(self.mask_convs_x, self.mask_convs_y): mask_feat_x = mask_layer_x(mask_feat_x) mask_feat_y = mask_layer_y(mask_feat_y) mask_feat_x = F.interpolate( mask_feat_x, scale_factor=2, mode='bilinear') mask_feat_y = F.interpolate( mask_feat_y, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat_x) mask_pred_y = self.conv_mask_list_y[i](mask_feat_y) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds def loss(self, mlvl_mask_preds_x, mlvl_mask_preds_y, mlvl_cls_preds, gt_labels, gt_masks, img_metas, gt_bboxes=None, **kwargs): """Calculate the loss of total batch. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). gt_labels (list[Tensor]): Labels of multiple images. gt_masks (list[Tensor]): Ground truth masks of multiple images. Each has shape (num_instances, h, w). img_metas (list[dict]): Meta information of multiple images. gt_bboxes (list[Tensor]): Ground truth bboxes of multiple images. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(gt_labels) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x] pos_mask_targets, labels, \ xy_pos_indexes = \ multi_apply(self._get_targets_single, gt_bboxes, gt_labels, gt_masks, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds_x[lvl].append( mlvl_mask_preds_x[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 1]]) mlvl_pos_mask_preds_y[lvl].append( mlvl_mask_preds_y[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 0]]) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds_x[lvl] = torch.cat( mlvl_pos_mask_preds_x[lvl], dim=0) mlvl_pos_mask_preds_y[lvl] = torch.cat( mlvl_pos_mask_preds_y[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = 0. # dice loss loss_mask = [] for pred_x, pred_y, target in \ zip(mlvl_pos_mask_preds_x, mlvl_pos_mask_preds_y, mlvl_pos_mask_targets): num_masks = pred_x.size(0) if num_masks == 0: # make sure can get grad loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0)) continue num_pos += num_masks pred_mask = pred_y.sigmoid() * pred_x.sigmoid() loss_mask.append( self.loss_mask(pred_mask, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() # cate flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_bboxes, gt_labels, gt_masks, featmap_sizes=None): """Compute targets for predictions of single image. Args: gt_bboxes (Tensor): Ground truth bbox of each instance, shape (num_gts, 4). gt_labels (Tensor): Ground truth label of each instance, shape (num_gts,). gt_masks (Tensor): Ground truth mask of each instance, shape (num_gts, h, w). featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Default: None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_xy_pos_indexes (list[Tensor]): Each element in the list contains the index of positive samples in corresponding level, has shape (num_pos, 2), last dimension 2 present (index_x, index_y). """ mlvl_pos_mask_targets, mlvl_labels, \ mlvl_pos_masks = \ super()._get_targets_single(gt_bboxes, gt_labels, gt_masks, featmap_sizes=featmap_sizes) mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero() for item in mlvl_labels] return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes def get_results(self, mlvl_mask_preds_x, mlvl_mask_preds_y, mlvl_cls_scores, img_metas, rescale=None, **kwargs): """Get multi-image mask results. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes ,num_grids ,num_grids). img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(img_metas)): cls_pred_list = [ mlvl_cls_scores[i][img_id].view( -1, self.cls_out_channels).detach() for i in range(num_levels) ] mask_pred_list_x = [ mlvl_mask_preds_x[i][img_id] for i in range(num_levels) ] mask_pred_list_y = [ mlvl_mask_preds_y[i][img_id] for i in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0) mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0) results = self._get_results_single( cls_pred_list, mask_pred_list_x, mask_pred_list_y, img_meta=img_metas[img_id], cfg=self.test_cfg) results_list.append(results) return results_list def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y, img_meta, cfg): """Get processed mask related results of single image. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds_x (Tensor): Mask prediction of x branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). mask_preds_y (Tensor): Mask prediction of y branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict): Config used in test phase. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(results, cls_scores): """Generate a empty results.""" results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) results.labels = cls_scores.new_ones(0) return results cfg = self.test_cfg if cfg is None else cfg results = InstanceData(img_meta) img_shape = results.img_shape ori_shape = results.ori_shape h, w, _ = img_shape featmap_size = mask_preds_x.size()[-2:] upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] inds = score_mask.nonzero() lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0) num_all_points = lvl_interval[-1] lvl_start_index = inds.new_ones(num_all_points) num_grids = inds.new_ones(num_all_points) seg_size = inds.new_tensor(self.num_grids).cumsum(0) mask_lvl_start_index = inds.new_ones(num_all_points) strides = inds.new_ones(num_all_points) lvl_start_index[:lvl_interval[0]] *= 0 mask_lvl_start_index[:lvl_interval[0]] *= 0 num_grids[:lvl_interval[0]] *= self.num_grids[0] strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ lvl_interval[lvl - 1] mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ seg_size[lvl - 1] num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.num_grids[lvl] strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.strides[lvl] lvl_start_index = lvl_start_index[inds[:, 0]] mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]] num_grids = num_grids[inds[:, 0]] strides = strides[inds[:, 0]] y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids y_inds = mask_lvl_start_index + y_lvl_offset x_inds = mask_lvl_start_index + x_lvl_offset cls_labels = inds[:, 1] mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(results, cls_scores) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results.masks = masks results.labels = labels results.scores = scores return results @HEADS.register_module() class DecoupledSOLOLightHead(DecoupledSOLOHead): """Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by Locations <https://arxiv.org/abs/1912.04488>`_ Args: with_dcn (bool): Whether use dcn in mask_convs and cls_convs, default: False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, dcn_cfg=None, init_cfg=[ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs): assert dcn_cfg is None or isinstance(dcn_cfg, dict) self.dcn_cfg = dcn_cfg super(DecoupledSOLOLightHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) def _init_layers(self): self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): if self.dcn_cfg is not None\ and i == self.stacked_convs - 1: conv_cfg = self.dcn_cfg else: conv_cfg = None chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, feats): assert len(feats) == self.num_levels feats = self.resize_feats(feats) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in self.mask_convs: mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat) mask_pred_y = self.conv_mask_list_y[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds
47,265
39.123939
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/embedding_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import BaseModule from mmdet.models.builder import HEADS from ...core import bbox_cxcywh_to_xyxy @HEADS.register_module() class EmbeddingRPNHead(BaseModule): """RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ . Unlike traditional RPNHead, this module does not need FPN input, but just decode `init_proposal_bboxes` and expand the first dimension of `init_proposal_bboxes` and `init_proposal_features` to the batch_size. Args: num_proposals (int): Number of init_proposals. Default 100. proposal_feature_channel (int): Channel number of init_proposal_feature. Defaults to 256. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_proposals=100, proposal_feature_channel=256, init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(EmbeddingRPNHead, self).__init__(init_cfg) self.num_proposals = num_proposals self.proposal_feature_channel = proposal_feature_channel self._init_layers() def _init_layers(self): """Initialize a sparse set of proposal boxes and proposal features.""" self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) self.init_proposal_features = nn.Embedding( self.num_proposals, self.proposal_feature_channel) def init_weights(self): """Initialize the init_proposal_bboxes as normalized. [c_x, c_y, w, h], and we initialize it to the size of the entire image. """ super(EmbeddingRPNHead, self).init_weights() nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) def _decode_init_proposals(self, imgs, img_metas): """Decode init_proposal_bboxes according to the size of images and expand dimension of init_proposal_features to batch_size. Args: imgs (list[Tensor]): List of FPN features. img_metas (list[dict]): List of meta-information of images. Need the img_shape to decode the init_proposals. Returns: Tuple(Tensor): - proposals (Tensor): Decoded proposal bboxes, has shape (batch_size, num_proposals, 4). - init_proposal_features (Tensor): Expanded proposal features, has shape (batch_size, num_proposals, proposal_feature_channel). - imgs_whwh (Tensor): Tensor with shape (batch_size, 4), the dimension means [img_width, img_height, img_width, img_height]. """ proposals = self.init_proposal_bboxes.weight.clone() proposals = bbox_cxcywh_to_xyxy(proposals) num_imgs = len(imgs[0]) imgs_whwh = [] for meta in img_metas: h, w, _ = meta['img_shape'] imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]])) imgs_whwh = torch.cat(imgs_whwh, dim=0) imgs_whwh = imgs_whwh[:, None, :] # imgs_whwh has shape (batch_size, 1, 4) # The shape of proposals change from (num_proposals, 4) # to (batch_size ,num_proposals, 4) proposals = proposals * imgs_whwh init_proposal_features = self.init_proposal_features.weight.clone() init_proposal_features = init_proposal_features[None].expand( num_imgs, *init_proposal_features.size()) return proposals, init_proposal_features, imgs_whwh def forward_dummy(self, img, img_metas): """Dummy forward function. Used in flops calculation. """ return self._decode_init_proposals(img, img_metas) def forward_train(self, img, img_metas): """Forward function in training stage.""" return self._decode_init_proposals(img, img_metas) def simple_test_rpn(self, img, img_metas): """Forward function in testing stage.""" return self._decode_init_proposals(img, img_metas) def simple_test(self, img, img_metas): """Forward function in testing stage.""" raise NotImplementedError def aug_test_rpn(self, feats, img_metas): raise NotImplementedError( 'EmbeddingRPNHead does not support test-time augmentation')
4,629
38.57265
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/autoassign_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import multi_apply from mmdet.core.anchor.point_generator import MlvlPointGenerator from mmdet.core.bbox import bbox_overlaps from mmdet.models import HEADS from mmdet.models.dense_heads.atss_head import reduce_mean from mmdet.models.dense_heads.fcos_head import FCOSHead from mmdet.models.dense_heads.paa_head import levels_to_images EPS = 1e-12 class CenterPrior(nn.Module): """Center Weighting module to adjust the category-specific prior distributions. Args: force_topk (bool): When no point falls into gt_bbox, forcibly select the k points closest to the center to calculate the center prior. Defaults to False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. num_classes (int): The class number of dataset. Defaults to 80. strides (tuple[int]): The stride of each input feature map. Defaults to (8, 16, 32, 64, 128). """ def __init__(self, force_topk=False, topk=9, num_classes=80, strides=(8, 16, 32, 64, 128)): super(CenterPrior, self).__init__() self.mean = nn.Parameter(torch.zeros(num_classes, 2)) self.sigma = nn.Parameter(torch.ones(num_classes, 2)) self.strides = strides self.force_topk = force_topk self.topk = topk def forward(self, anchor_points_list, gt_bboxes, labels, inside_gt_bbox_mask): """Get the center prior of each point on the feature map for each instance. Args: anchor_points_list (list[Tensor]): list of coordinate of points on feature map. Each with shape (num_points, 2). gt_bboxes (Tensor): The gt_bboxes with shape of (num_gt, 4). labels (Tensor): The gt_labels with shape of (num_gt). inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple(Tensor): - center_prior_weights(Tensor): Float tensor with shape \ of (num_points, num_gt). Each value represents \ the center weighting coefficient. - inside_gt_bbox_mask (Tensor): Tensor of bool type, \ with shape of (num_points, num_gt), each \ value is used to mark whether this point falls \ within a certain gt or is the topk nearest points for \ a specific gt_bbox. """ inside_gt_bbox_mask = inside_gt_bbox_mask.clone() num_gts = len(labels) num_points = sum([len(item) for item in anchor_points_list]) if num_gts == 0: return gt_bboxes.new_zeros(num_points, num_gts), inside_gt_bbox_mask center_prior_list = [] for slvl_points, stride in zip(anchor_points_list, self.strides): # slvl_points: points from single level in FPN, has shape (h*w, 2) # single_level_points has shape (h*w, num_gt, 2) single_level_points = slvl_points[:, None, :].expand( (slvl_points.size(0), len(gt_bboxes), 2)) gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2) gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2) gt_center = torch.stack((gt_center_x, gt_center_y), dim=1) gt_center = gt_center[None] # instance_center has shape (1, num_gt, 2) instance_center = self.mean[labels][None] # instance_sigma has shape (1, num_gt, 2) instance_sigma = self.sigma[labels][None] # distance has shape (num_points, num_gt, 2) distance = (((single_level_points - gt_center) / float(stride) - instance_center)**2) center_prior = torch.exp(-distance / (2 * instance_sigma**2)).prod(dim=-1) center_prior_list.append(center_prior) center_prior_weights = torch.cat(center_prior_list, dim=0) if self.force_topk: gt_inds_no_points_inside = torch.nonzero( inside_gt_bbox_mask.sum(0) == 0).reshape(-1) if gt_inds_no_points_inside.numel(): topk_center_index = \ center_prior_weights[:, gt_inds_no_points_inside].topk( self.topk, dim=0)[1] temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside] inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \ torch.scatter(temp_mask, dim=0, index=topk_center_index, src=torch.ones_like( topk_center_index, dtype=torch.bool)) center_prior_weights[~inside_gt_bbox_mask] = 0 return center_prior_weights, inside_gt_bbox_mask @HEADS.register_module() class AutoAssignHead(FCOSHead): """AutoAssignHead head used in AutoAssign. More details can be found in the `paper <https://arxiv.org/abs/2007.03496>`_ . Args: force_topk (bool): Used in center prior initialization to handle extremely small gt. Default is False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. pos_loss_weight (float): The loss weight of positive loss and with default value 0.25. neg_loss_weight (float): The loss weight of negative loss and with default value 0.75. center_loss_weight (float): The loss weight of center prior loss and with default value 0.75. """ def __init__(self, *args, force_topk=False, topk=9, pos_loss_weight=0.25, neg_loss_weight=0.75, center_loss_weight=0.75, **kwargs): super().__init__(*args, conv_bias=True, **kwargs) self.center_prior = CenterPrior( force_topk=force_topk, topk=topk, num_classes=self.num_classes, strides=self.strides) self.pos_loss_weight = pos_loss_weight self.neg_loss_weight = neg_loss_weight self.center_loss_weight = center_loss_weight self.prior_generator = MlvlPointGenerator(self.strides, offset=0) def init_weights(self): """Initialize weights of the head. In particular, we have special initialization for classified conv's and regression conv's bias """ super(AutoAssignHead, self).init_weights() bias_cls = bias_init_with_prob(0.02) normal_init(self.conv_cls, std=0.01, bias=bias_cls) normal_init(self.conv_reg, std=0.01, bias=4.0) def forward_single(self, x, scale, stride): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness \ predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super( FCOSHead, self).forward_single(x) centerness = self.conv_centerness(reg_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() bbox_pred = F.relu(bbox_pred) bbox_pred *= stride return cls_score, bbox_pred, centerness def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels, center_prior_weights): """Calculate the positive loss of all points in gt_bboxes. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points, has shape (num_points, 1). reg_loss (Tensor): The regression loss of each gt_bbox and each prediction box, has shape of (num_points, num_gt). gt_labels (Tensor): The zeros based gt_labels of all gt with shape of (num_gt,). center_prior_weights (Tensor): Float tensor with shape of (num_points, num_gt). Each value represents the center weighting coefficient. Returns: tuple[Tensor]: - pos_loss (Tensor): The positive loss of all points in the gt_bboxes. """ # p_loc: localization confidence p_loc = torch.exp(-reg_loss) # p_cls: classification confidence p_cls = (cls_score * objectness)[:, gt_labels] # p_pos: joint confidence indicator p_pos = p_cls * p_loc # 3 is a hyper-parameter to control the contributions of high and # low confidence locations towards positive losses. confidence_weight = torch.exp(p_pos * 3) p_pos_weight = (confidence_weight * center_prior_weights) / ( (confidence_weight * center_prior_weights).sum( 0, keepdim=True)).clamp(min=EPS) reweighted_p_pos = (p_pos * p_pos_weight).sum(0) pos_loss = F.binary_cross_entropy( reweighted_p_pos, torch.ones_like(reweighted_p_pos), reduction='none') pos_loss = pos_loss.sum() * self.pos_loss_weight return pos_loss, def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious, inside_gt_bbox_mask): """Calculate the negative loss of all points in feature map. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points and is shape of (num_points, 1). gt_labels (Tensor): The zeros based label of all gt with shape of (num_gt). ious (Tensor): Float tensor with shape of (num_points, num_gt). Each value represent the iou of pred_bbox and gt_bboxes. inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple[Tensor]: - neg_loss (Tensor): The negative loss of all points in the feature map. """ num_gts = len(gt_labels) joint_conf = (cls_score * objectness) p_neg_weight = torch.ones_like(joint_conf) if num_gts > 0: # the order of dinmension would affect the value of # p_neg_weight, we strictly follow the original # implementation. inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0) ious = ious.permute(1, 0) foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True) temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS)) def normalize(x): return (x - x.min() + EPS) / (x.max() - x.min() + EPS) for instance_idx in range(num_gts): idxs = foreground_idxs[0] == instance_idx if idxs.any(): temp_weight[idxs] = normalize(temp_weight[idxs]) p_neg_weight[foreground_idxs[1], gt_labels[foreground_idxs[0]]] = 1 - temp_weight logits = (joint_conf * p_neg_weight) neg_loss = ( logits**2 * F.binary_cross_entropy( logits, torch.zeros_like(logits), reduction='none')) neg_loss = neg_loss.sum() * self.neg_loss_weight return neg_loss, @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) def loss(self, cls_scores, bbox_preds, objectnesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. objectnesses (list[Tensor]): objectness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) all_num_gt = sum([len(item) for item in gt_bboxes]) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets( all_level_points, gt_bboxes) center_prior_weight_list = [] temp_inside_gt_bbox_mask_list = [] for gt_bboxe, gt_label, inside_gt_bbox_mask in zip( gt_bboxes, gt_labels, inside_gt_bbox_mask_list): center_prior_weight, inside_gt_bbox_mask = \ self.center_prior(all_level_points, gt_bboxe, gt_label, inside_gt_bbox_mask) center_prior_weight_list.append(center_prior_weight) temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask) inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list mlvl_points = torch.cat(all_level_points, dim=0) bbox_preds = levels_to_images(bbox_preds) cls_scores = levels_to_images(cls_scores) objectnesses = levels_to_images(objectnesses) reg_loss_list = [] ious_list = [] num_points = len(mlvl_points) for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip( bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list): temp_num_gt = encoded_targets.size(1) expand_mlvl_points = mlvl_points[:, None, :].expand( num_points, temp_num_gt, 2).reshape(-1, 2) encoded_targets = encoded_targets.reshape(-1, 4) expand_bbox_pred = bbox_pred[:, None, :].expand( num_points, temp_num_gt, 4).reshape(-1, 4) decoded_bbox_preds = self.bbox_coder.decode( expand_mlvl_points, expand_bbox_pred) decoded_target_preds = self.bbox_coder.decode( expand_mlvl_points, encoded_targets) with torch.no_grad(): ious = bbox_overlaps( decoded_bbox_preds, decoded_target_preds, is_aligned=True) ious = ious.reshape(num_points, temp_num_gt) if temp_num_gt: ious = ious.max( dim=-1, keepdim=True).values.repeat(1, temp_num_gt) else: ious = ious.new_zeros(num_points, temp_num_gt) ious[~inside_gt_bbox_mask] = 0 ious_list.append(ious) loss_bbox = self.loss_bbox( decoded_bbox_preds, decoded_target_preds, weight=None, reduction_override='none') reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt)) cls_scores = [item.sigmoid() for item in cls_scores] objectnesses = [item.sigmoid() for item in objectnesses] pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores, objectnesses, reg_loss_list, gt_labels, center_prior_weight_list) pos_avg_factor = reduce_mean( bbox_pred.new_tensor(all_num_gt)).clamp_(min=1) pos_loss = sum(pos_loss_list) / pos_avg_factor neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores, objectnesses, gt_labels, ious_list, inside_gt_bbox_mask_list) neg_avg_factor = sum(item.data.sum() for item in center_prior_weight_list) neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1) neg_loss = sum(neg_loss_list) / neg_avg_factor center_loss = [] for i in range(len(img_metas)): if inside_gt_bbox_mask_list[i].any(): center_loss.append( len(gt_bboxes[i]) / center_prior_weight_list[i].sum().clamp_(min=EPS)) # when width or height of gt_bbox is smaller than stride of p3 else: center_loss.append(center_prior_weight_list[i].sum() * 0) center_loss = torch.stack(center_loss).mean() * self.center_loss_weight # avoid dead lock in DDP if all_num_gt == 0: pos_loss = bbox_preds[0].sum() * 0 dummy_center_prior_loss = self.center_prior.mean.sum( ) * 0 + self.center_prior.sigma.sum() * 0 center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss loss = dict( loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss) return loss def get_targets(self, points, gt_bboxes_list): """Compute regression targets and each point inside or outside gt_bbox in multiple images. Args: points (list[Tensor]): Points of all fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). Returns: tuple(list[Tensor]): - inside_gt_bbox_mask_list (list[Tensor]): Each Tensor is with bool type and shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each level. Each tensor has shape (num_points, num_gt, 4). """ concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl inside_gt_bbox_mask_list, bbox_targets_list = multi_apply( self._get_target_single, gt_bboxes_list, points=concat_points) return inside_gt_bbox_mask_list, bbox_targets_list def _get_target_single(self, gt_bboxes, points): """Compute regression targets and each point inside or outside gt_bbox for a single image. Args: gt_bboxes (Tensor): gt_bbox of single image, has shape (num_gt, 4). points (Tensor): Points of all fpn level, has shape (num_points, 2). Returns: tuple[Tensor]: Containing the following Tensors: - inside_gt_bbox_mask (Tensor): Bool tensor with shape (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. - bbox_targets (Tensor): BBox targets of each points with each gt_bboxes, has shape (num_points, num_gt, 4). """ num_points = points.size(0) num_gts = gt_bboxes.size(0) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None] ys = ys[:, None] left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if num_gts: inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 else: inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts), dtype=torch.bool) return inside_gt_bbox_mask, bbox_targets def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Almost the same as the implementation in fcos, we remove half stride offset to align with the original implementation. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `AutoAssignHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') y, x = super(FCOSHead, self)._get_points_single(featmap_size, stride, dtype, device) points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), dim=-1) return points
22,895
42.611429
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/pisa_retinanet_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.core import images_to_levels from ..builder import HEADS from ..losses import carl_loss, isr_p from .retina_head import RetinaHead @HEADS.register_module() class PISARetinaHead(RetinaHead): """PISA Retinanet Head. The head owns the same structure with Retinanet Head, but differs in two aspects: 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to change the positive loss weights. 2. Classification-aware regression loss is adopted as a third loss. """ @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes of each image with shape (num_obj, 4). gt_labels (list[Tensor]): Ground truth labels of each image with shape (num_obj, 4). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. Default: None. Returns: dict: Loss dict, comprise classification loss, regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(img_metas) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) for cls_score in cls_scores ] flatten_cls_scores = torch.cat( flatten_cls_scores, dim=1).reshape(-1, flatten_cls_scores[0].size(-1)) flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_bbox_preds = torch.cat( flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) flatten_label_weights = torch.cat( label_weights_list, dim=1).reshape(-1) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) flatten_bbox_targets = torch.cat( bbox_targets_list, dim=1).reshape(-1, 4) flatten_bbox_weights = torch.cat( bbox_weights_list, dim=1).reshape(-1, 4) # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p( flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets # For convenience we compute loss once instead separating by fpn level, # so that we don't need to separate the weights by level again. # The result should be the same losses_cls = self.loss_cls( flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples) losses_bbox = self.loss_bbox( flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict
6,267
39.179487
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/gfl_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner, build_sampler, images_to_levels, multi_apply, reduce_mean, unmap) from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Default: 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @HEADS.register_module() class GFLHead(AnchorHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (dict): Config of Quality Focal Loss (QFL). bbox_coder (dict): Config of bbox coder. Defaults 'DistancePointBBoxCoder'. reg_max (int): Max value of integral set :math: `{0, ..., reg_max}` in QFL setting. Default: 16. init_cfg (dict or list[dict], optional): Initialization config dict. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), bbox_coder=dict(type='DistancePointBBoxCoder'), reg_max=16, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='gfl_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super(GFLHead, self).__init__( num_classes, in_channels, bbox_coder=bbox_coder, init_cfg=init_cfg, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.integral = Integral(self.reg_max) self.loss_dfl = build_loss(loss_dfl) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) assert self.num_anchors == 1, 'anchor free version' self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification and quality (IoU) joint scores for all scale levels, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box distribution logits for all scale levels, each is a 4D-tensor, the channel number is 4*(n+1), n is max value of integral set. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls and quality joint scores for a single scale level the channel number is num_classes. bbox_pred (Tensor): Box distribution logits for a single scale level, the channel number is 4*(n+1), n is max value of integral set. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.gfl_cls(cls_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() return cls_score, bbox_pred def anchor_center(self, anchors): """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, stride, num_total_samples): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos, dtype=torch.float, device=device)).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, losses_dfl,\ avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, num_total_samples=num_total_samples) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. GFL head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( zip(cls_score_list, bbox_pred_list, self.prior_generator.strides, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] assert stride[0] == stride[1] bbox_pred = bbox_pred.permute(1, 2, 0) bbox_pred = self.integral(bbox_pred) * stride[0] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self.bbox_coder.decode( self.anchor_center(priors), bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process( mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale=rescale, with_nms=with_nms) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for GFL head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4). pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
27,913
42.010786
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/corner_head.py
# Copyright (c) OpenMMLab. All rights reserved. from logging import warning from math import ceil, log import torch import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob from mmcv.ops import CornerPool, batched_nms from mmcv.runner import BaseModule from mmdet.core import multi_apply from ..builder import HEADS, build_loss from ..utils import gaussian_radius, gen_gaussian_target from ..utils.gaussian_target import (gather_feat, get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin class BiCornerPool(BaseModule): """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) Args: in_channels (int): Input channels of module. out_channels (int): Output channels of module. feat_channels (int): Feature channels of module. directions (list[str]): Directions of two CornerPools. norm_cfg (dict): Dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, directions, feat_channels=128, out_channels=128, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None): super(BiCornerPool, self).__init__(init_cfg) self.direction1_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction2_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.aftpool_conv = ConvModule( feat_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None) self.conv1 = ConvModule( in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.conv2 = ConvModule( in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction1_pool = CornerPool(directions[0]) self.direction2_pool = CornerPool(directions[1]) self.relu = nn.ReLU(inplace=True) def forward(self, x): """Forward features from the upstream network. Args: x (tensor): Input feature of BiCornerPool. Returns: conv2 (tensor): Output feature of BiCornerPool. """ direction1_conv = self.direction1_conv(x) direction2_conv = self.direction2_conv(x) direction1_feat = self.direction1_pool(direction1_conv) direction2_feat = self.direction2_pool(direction2_conv) aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) conv1 = self.conv1(x) relu = self.relu(aftpool_conv + conv1) conv2 = self.conv2(relu) return conv2 @HEADS.register_module() class CornerHead(BaseDenseHead, BBoxTestMixin): """Head of CornerNet: Detecting Objects as Paired Keypoints. Code is modified from the `official github repo <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/ kp.py#L73>`_ . More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. Because HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Default: 2. corner_emb_channels (int): Channel of embedding vector. Default: 1. train_cfg (dict | None): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. Default: None. test_cfg (dict | None): Testing config of CornerHead. Default: None. loss_heatmap (dict | None): Config of corner heatmap loss. Default: GaussianFocalLoss. loss_embedding (dict | None): Config of corner embedding loss. Default: AssociativeEmbeddingLoss. loss_offset (dict | None): Config of corner offset loss. Default: SmoothL1Loss. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_classes, in_channels, num_feat_levels=2, corner_emb_channels=1, train_cfg=None, test_cfg=None, loss_heatmap=dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding=dict( type='AssociativeEmbeddingLoss', pull_weight=0.25, push_weight=0.25), loss_offset=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(CornerHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.corner_emb_channels = corner_emb_channels self.with_corner_emb = self.corner_emb_channels > 0 self.corner_offset_channels = 2 self.num_feat_levels = num_feat_levels self.loss_heatmap = build_loss( loss_heatmap) if loss_heatmap is not None else None self.loss_embedding = build_loss( loss_embedding) if loss_embedding is not None else None self.loss_offset = build_loss( loss_offset) if loss_offset is not None else None self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _make_layers(self, out_channels, in_channels=256, feat_channels=256): """Initialize conv sequential for CornerHead.""" return nn.Sequential( ConvModule(in_channels, feat_channels, 3, padding=1), ConvModule( feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) def _init_corner_kpt_layers(self): """Initialize corner keypoint layers. Including corner heatmap branch and corner offset branch. Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_pool.append( BiCornerPool( self.in_channels, ['top', 'left'], out_channels=self.in_channels)) self.br_pool.append( BiCornerPool( self.in_channels, ['bottom', 'right'], out_channels=self.in_channels)) self.tl_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.br_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.tl_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) self.br_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) def _init_corner_emb_layers(self): """Initialize corner embedding layers. Only include corner embedding branch with two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) self.br_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) def _init_layers(self): """Initialize layers for CornerHead. Including two parts: corner keypoint layers and corner embedding layers """ self._init_corner_kpt_layers() if self.with_corner_emb: self._init_corner_emb_layers() def init_weights(self): super(CornerHead, self).init_weights() bias_init = bias_init_with_prob(0.1) for i in range(self.num_feat_levels): # The initialization of parameters are different between # nn.Conv2d and ConvModule. Our experiments show that # using the original initialization of nn.Conv2d increases # the final mAP by about 0.2% self.tl_heat[i][-1].conv.reset_parameters() self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) self.br_heat[i][-1].conv.reset_parameters() self.br_heat[i][-1].conv.bias.data.fill_(bias_init) self.tl_off[i][-1].conv.reset_parameters() self.br_off[i][-1].conv.reset_parameters() if self.with_corner_emb: self.tl_emb[i][-1].conv.reset_parameters() self.br_emb[i][-1].conv.reset_parameters() def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of corner heatmaps, offset heatmaps and embedding heatmaps. - tl_heats (list[Tensor]): Top-left corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - br_heats (list[Tensor]): Bottom-right corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - tl_embs (list[Tensor] | list[None]): Top-left embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - br_embs (list[Tensor] | list[None]): Bottom-right embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - tl_offs (list[Tensor]): Top-left offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. - br_offs (list[Tensor]): Bottom-right offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. """ lvl_ind = list(range(self.num_feat_levels)) return multi_apply(self.forward_single, feats, lvl_ind) def forward_single(self, x, lvl_ind, return_pool=False): """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. return_pool (bool): Return corner pool feature or not. Returns: tuple[Tensor]: A tuple of CornerHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_emb (Tensor | None): Predicted top-left embedding heatmap. None for `self.with_corner_emb == False`. - br_emb (Tensor | None): Predicted bottom-right embedding heatmap. None for `self.with_corner_emb == False`. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_pool (Tensor): Top-left corner pool feature. Not must have. - br_pool (Tensor): Bottom-right corner pool feature. Not must have. """ tl_pool = self.tl_pool[lvl_ind](x) tl_heat = self.tl_heat[lvl_ind](tl_pool) br_pool = self.br_pool[lvl_ind](x) br_heat = self.br_heat[lvl_ind](br_pool) tl_emb, br_emb = None, None if self.with_corner_emb: tl_emb = self.tl_emb[lvl_ind](tl_pool) br_emb = self.br_emb[lvl_ind](br_pool) tl_off = self.tl_off[lvl_ind](tl_pool) br_off = self.br_off[lvl_ind](br_pool) result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] if return_pool: result_list.append(tl_pool) result_list.append(br_pool) return result_list def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape, with_corner_emb=False, with_guiding_shift=False, with_centripetal_shift=False): """Generate corner targets. Including corner heatmap, corner offset. Optional: corner embedding, corner guiding shift, centripetal shift. For CornerNet, we generate corner heatmap, corner offset and corner embedding from this function. For CentripetalNet, we generate corner heatmap, corner offset, guiding shift and centripetal shift from this function. Args: gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). feat_shape (list[int]): Shape of output feature, [batch, channel, height, width]. img_shape (list[int]): Shape of input image, [height, width, channel]. with_corner_emb (bool): Generate corner embedding target or not. Default: False. with_guiding_shift (bool): Generate guiding shift target or not. Default: False. with_centripetal_shift (bool): Generate centripetal shift target or not. Default: False. Returns: dict: Ground truth of corner heatmap, corner offset, corner embedding, guiding shift and centripetal shift. Containing the following keys: - topleft_heatmap (Tensor): Ground truth top-left corner heatmap. - bottomright_heatmap (Tensor): Ground truth bottom-right corner heatmap. - topleft_offset (Tensor): Ground truth top-left corner offset. - bottomright_offset (Tensor): Ground truth bottom-right corner offset. - corner_embedding (list[list[list[int]]]): Ground truth corner embedding. Not must have. - topleft_guiding_shift (Tensor): Ground truth top-left corner guiding shift. Not must have. - bottomright_guiding_shift (Tensor): Ground truth bottom-right corner guiding shift. Not must have. - topleft_centripetal_shift (Tensor): Ground truth top-left corner centripetal shift. Not must have. - bottomright_centripetal_shift (Tensor): Ground truth bottom-right corner centripetal shift. Not must have. """ batch_size, _, height, width = feat_shape img_h, img_w = img_shape[:2] width_ratio = float(width / img_w) height_ratio = float(height / img_h) gt_tl_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_br_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) if with_corner_emb: match = [] # Guiding shift is a kind of offset, from center to corner if with_guiding_shift: gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) # Centripetal shift is also a kind of offset, from center to corner # and normalized by log. if with_centripetal_shift: gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) for batch_id in range(batch_size): # Ground truth of corner embedding per image is a list of coord set corner_match = [] for box_id in range(len(gt_labels[batch_id])): left, top, right, bottom = gt_bboxes[batch_id][box_id] center_x = (left + right) / 2.0 center_y = (top + bottom) / 2.0 label = gt_labels[batch_id][box_id] # Use coords in the feature level to generate ground truth scale_left = left * width_ratio scale_right = right * width_ratio scale_top = top * height_ratio scale_bottom = bottom * height_ratio scale_center_x = center_x * width_ratio scale_center_y = center_y * height_ratio # Int coords on feature map/ground truth tensor left_idx = int(min(scale_left, width - 1)) right_idx = int(min(scale_right, width - 1)) top_idx = int(min(scale_top, height - 1)) bottom_idx = int(min(scale_bottom, height - 1)) # Generate gaussian heatmap scale_box_width = ceil(scale_right - scale_left) scale_box_height = ceil(scale_bottom - scale_top) radius = gaussian_radius((scale_box_height, scale_box_width), min_overlap=0.3) radius = max(0, int(radius)) gt_tl_heatmap[batch_id, label] = gen_gaussian_target( gt_tl_heatmap[batch_id, label], [left_idx, top_idx], radius) gt_br_heatmap[batch_id, label] = gen_gaussian_target( gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], radius) # Generate corner offset left_offset = scale_left - left_idx top_offset = scale_top - top_idx right_offset = scale_right - right_idx bottom_offset = scale_bottom - bottom_idx gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset gt_br_offset[batch_id, 1, bottom_idx, right_idx] = bottom_offset # Generate corner embedding if with_corner_emb: corner_match.append([[top_idx, left_idx], [bottom_idx, right_idx]]) # Generate guiding shift if with_guiding_shift: gt_tl_guiding_shift[batch_id, 0, top_idx, left_idx] = scale_center_x - left_idx gt_tl_guiding_shift[batch_id, 1, top_idx, left_idx] = scale_center_y - top_idx gt_br_guiding_shift[batch_id, 0, bottom_idx, right_idx] = right_idx - scale_center_x gt_br_guiding_shift[ batch_id, 1, bottom_idx, right_idx] = bottom_idx - scale_center_y # Generate centripetal shift if with_centripetal_shift: gt_tl_centripetal_shift[batch_id, 0, top_idx, left_idx] = log(scale_center_x - scale_left) gt_tl_centripetal_shift[batch_id, 1, top_idx, left_idx] = log(scale_center_y - scale_top) gt_br_centripetal_shift[batch_id, 0, bottom_idx, right_idx] = log(scale_right - scale_center_x) gt_br_centripetal_shift[batch_id, 1, bottom_idx, right_idx] = log(scale_bottom - scale_center_y) if with_corner_emb: match.append(corner_match) target_result = dict( topleft_heatmap=gt_tl_heatmap, topleft_offset=gt_tl_offset, bottomright_heatmap=gt_br_heatmap, bottomright_offset=gt_br_offset) if with_corner_emb: target_result.update(corner_embedding=match) if with_guiding_shift: target_result.update( topleft_guiding_shift=gt_tl_guiding_shift, bottomright_guiding_shift=gt_br_guiding_shift) if with_centripetal_shift: target_result.update( topleft_centripetal_shift=gt_tl_centripetal_shift, bottomright_centripetal_shift=gt_br_centripetal_shift) return target_result def loss(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [left, top, right, bottom] format. gt_labels (list[Tensor]): Class indices corresponding to each box. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - pull_loss (list[Tensor]): Part one of AssociativeEmbedding losses of all feature levels. - push_loss (list[Tensor]): Part two of AssociativeEmbedding losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. """ targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, img_metas[0]['pad_shape'], with_corner_emb=self.with_corner_emb) mlvl_targets = [targets for _ in range(self.num_feat_levels)] det_losses, pull_losses, push_losses, off_losses = multi_apply( self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, mlvl_targets) loss_dict = dict(det_loss=det_losses, off_loss=off_losses) if self.with_corner_emb: loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) return loss_dict def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off, targets): """Compute losses for single level. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - pull_loss (Tensor): Part one of AssociativeEmbedding loss. - push_loss (Tensor): Part two of AssociativeEmbedding loss. - off_loss (Tensor): Corner offset loss. """ gt_tl_hmp = targets['topleft_heatmap'] gt_br_hmp = targets['bottomright_heatmap'] gt_tl_off = targets['topleft_offset'] gt_br_off = targets['bottomright_offset'] gt_embedding = targets['corner_embedding'] # Detection loss tl_det_loss = self.loss_heatmap( tl_hmp.sigmoid(), gt_tl_hmp, avg_factor=max(1, gt_tl_hmp.eq(1).sum())) br_det_loss = self.loss_heatmap( br_hmp.sigmoid(), gt_br_hmp, avg_factor=max(1, gt_br_hmp.eq(1).sum())) det_loss = (tl_det_loss + br_det_loss) / 2.0 # AssociativeEmbedding loss if self.with_corner_emb and self.loss_embedding is not None: pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, gt_embedding) else: pull_loss, push_loss = None, None # Offset loss # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_hmp) br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_hmp) tl_off_loss = self.loss_offset( tl_off, gt_tl_off, tl_off_mask, avg_factor=max(1, tl_off_mask.sum())) br_off_loss = self.loss_offset( br_off, gt_br_off, br_off_mask, avg_factor=max(1, br_off_mask.sum())) off_loss = (tl_off_loss + br_off_loss) / 2.0 return det_loss, pull_loss, push_loss, off_loss def get_bboxes(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list def _get_bboxes_single(self, tl_heat, br_heat, tl_off, br_off, img_meta, tl_emb=None, br_emb=None, tl_centripetal_shift=None, br_centripetal_shift=None, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift: Top-left corner's centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift: Bottom-right corner's centripetal shift for current level with shape (N, 2, H, W). rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. """ if isinstance(img_meta, (list, tuple)): img_meta = img_meta[0] batch_bboxes, batch_scores, batch_clses = self.decode_heatmap( tl_heat=tl_heat.sigmoid(), br_heat=br_heat.sigmoid(), tl_off=tl_off, br_off=br_off, tl_emb=tl_emb, br_emb=br_emb, tl_centripetal_shift=tl_centripetal_shift, br_centripetal_shift=br_centripetal_shift, img_meta=img_meta, k=self.test_cfg.corner_topk, kernel=self.test_cfg.local_maximum_kernel, distance_threshold=self.test_cfg.distance_threshold) if rescale: batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor']) bboxes = batch_bboxes.view([-1, 4]) scores = batch_scores.view(-1) clses = batch_clses.view(-1) detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1) keepinds = (detections[:, -1] > -0.1) detections = detections[keepinds] labels = clses[keepinds] if with_nms: detections, labels = self._bboxes_nms(detections, labels, self.test_cfg) return detections, labels def _bboxes_nms(self, bboxes, labels, cfg): if 'nms_cfg' in cfg: warning.warn('nms_cfg in test_cfg will be deprecated. ' 'Please rename it as nms') if 'nms' not in cfg: cfg.nms = cfg.nms_cfg if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels def decode_heatmap(self, tl_heat, br_heat, tl_off, br_off, tl_emb=None, br_emb=None, tl_centripetal_shift=None, br_centripetal_shift=None, img_meta=None, k=100, kernel=3, distance_threshold=0.5, num_dets=1000): """Transform outputs for a single batch item into raw bbox predictions. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_emb (Tensor | None): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor | None): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift (Tensor | None): Top-left centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift (Tensor | None): Bottom-right centripetal shift for current level with shape (N, 2, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. k (int): Get top k corner keypoints from heatmap. kernel (int): Max pooling kernel for extract local maximum pixels. distance_threshold (float): Distance threshold. Top-left and bottom-right corner keypoints with feature distance less than the threshold will be regarded as keypoints from same object. num_dets (int): Num of raw boxes before doing nms. Returns: tuple[torch.Tensor]: Decoded output of CornerHead, containing the following Tensors: - bboxes (Tensor): Coords of each box. - scores (Tensor): Scores of each box. - clses (Tensor): Categories of each box. """ with_embedding = tl_emb is not None and br_emb is not None with_centripetal_shift = ( tl_centripetal_shift is not None and br_centripetal_shift is not None) assert with_embedding + with_centripetal_shift == 1 batch, _, height, width = tl_heat.size() if torch.onnx.is_in_onnx_export(): inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2] else: inp_h, inp_w, _ = img_meta['pad_shape'] # perform nms on heatmaps tl_heat = get_local_maximum(tl_heat, kernel=kernel) br_heat = get_local_maximum(br_heat, kernel=kernel) tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap( tl_heat, k=k) br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap( br_heat, k=k) # We use repeat instead of expand here because expand is a # shallow-copy function. Thus it could cause unexpected testing result # sometimes. Using expand will decrease about 10% mAP during testing # compared to repeat. tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) tl_off = transpose_and_gather_feat(tl_off, tl_inds) tl_off = tl_off.view(batch, k, 1, 2) br_off = transpose_and_gather_feat(br_off, br_inds) br_off = br_off.view(batch, 1, k, 2) tl_xs = tl_xs + tl_off[..., 0] tl_ys = tl_ys + tl_off[..., 1] br_xs = br_xs + br_off[..., 0] br_ys = br_ys + br_off[..., 1] if with_centripetal_shift: tl_centripetal_shift = transpose_and_gather_feat( tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() br_centripetal_shift = transpose_and_gather_feat( br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] tl_ctys = tl_ys + tl_centripetal_shift[..., 1] br_ctxs = br_xs - br_centripetal_shift[..., 0] br_ctys = br_ys - br_centripetal_shift[..., 1] # all possible boxes based on top k corners (ignoring class) tl_xs *= (inp_w / width) tl_ys *= (inp_h / height) br_xs *= (inp_w / width) br_ys *= (inp_h / height) if with_centripetal_shift: tl_ctxs *= (inp_w / width) tl_ctys *= (inp_h / height) br_ctxs *= (inp_w / width) br_ctys *= (inp_h / height) x_off, y_off = 0, 0 # no crop if not torch.onnx.is_in_onnx_export(): # since `RandomCenterCropPad` is done on CPU with numpy and it's # not dynamic traceable when exporting to ONNX, thus 'border' # does not appears as key in 'img_meta'. As a tmp solution, # we move this 'border' handle part to the postprocess after # finished exporting to ONNX, which is handle in # `mmdet/core/export/model_wrappers.py`. Though difference between # pytorch and exported onnx model, it might be ignored since # comparable performance is achieved between them (e.g. 40.4 vs # 40.6 on COCO val2017, for CornerNet without test-time flip) if 'border' in img_meta: x_off = img_meta['border'][2] y_off = img_meta['border'][0] tl_xs -= x_off tl_ys -= y_off br_xs -= x_off br_ys -= y_off zeros = tl_xs.new_zeros(*tl_xs.size()) tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros) tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros) br_xs = torch.where(br_xs > 0.0, br_xs, zeros) br_ys = torch.where(br_ys > 0.0, br_ys, zeros) bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() if with_centripetal_shift: tl_ctxs -= x_off tl_ctys -= y_off br_ctxs -= x_off br_ctys -= y_off tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), dim=3) area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() rcentral = torch.zeros_like(ct_bboxes) # magic nums from paper section 4.1 mu = torch.ones_like(area_bboxes) / 2.4 mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * (rcentral[..., 3] - rcentral[..., 1])).abs() dists = area_ct_bboxes / area_rcentral tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( ct_bboxes[..., 0] >= rcentral[..., 2]) tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( ct_bboxes[..., 1] >= rcentral[..., 3]) br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( ct_bboxes[..., 2] >= rcentral[..., 2]) br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( ct_bboxes[..., 3] >= rcentral[..., 3]) if with_embedding: tl_emb = transpose_and_gather_feat(tl_emb, tl_inds) tl_emb = tl_emb.view(batch, k, 1) br_emb = transpose_and_gather_feat(br_emb, br_inds) br_emb = br_emb.view(batch, 1, k) dists = torch.abs(tl_emb - br_emb) tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) scores = (tl_scores + br_scores) / 2 # scores for all possible boxes # tl and br should have same class tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) cls_inds = (tl_clses != br_clses) # reject boxes based on distances dist_inds = dists > distance_threshold # reject boxes based on widths and heights width_inds = (br_xs <= tl_xs) height_inds = (br_ys <= tl_ys) # No use `scores[cls_inds]`, instead we use `torch.where` here. # Since only 1-D indices with type 'tensor(bool)' are supported # when exporting to ONNX, any other bool indices with more dimensions # (e.g. 2-D bool tensor) as input parameter in node is invalid negative_scores = -1 * torch.ones_like(scores) scores = torch.where(cls_inds, negative_scores, scores) scores = torch.where(width_inds, negative_scores, scores) scores = torch.where(height_inds, negative_scores, scores) scores = torch.where(dist_inds, negative_scores, scores) if with_centripetal_shift: scores[tl_ctx_inds] = -1 scores[tl_cty_inds] = -1 scores[br_ctx_inds] = -1 scores[br_cty_inds] = -1 scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) bboxes = bboxes.view(batch, -1, 4) bboxes = gather_feat(bboxes, inds) clses = tl_clses.contiguous().view(batch, -1, 1) clses = gather_feat(clses, inds).float() return bboxes, scores, clses def onnx_export(self, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, img_metas, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor, Tensor]: First tensor bboxes with shape [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) and second element is class labels of shape [N, num_det]. """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( img_metas) == 1 result_list = [] for img_id in range(len(img_metas)): result_list.append( self._get_bboxes_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) detections, labels = result_list[0] # batch_size 1 here, [1, num_det, 5], [1, num_det] return detections.unsqueeze(0), labels.unsqueeze(0)
48,420
43.668819
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/yolact_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList, force_fp32 from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply from mmdet.core.utils import select_single_mlvl from ..builder import HEADS, build_loss from .anchor_head import AnchorHead @HEADS.register_module() class YOLACTHead(AnchorHead): """YOLACT box head used in https://arxiv.org/abs/1904.02689. Note that YOLACT head is a light version of RetinaNet head. Four differences are described as follows: 1. YOLACT box head has three-times fewer anchors. 2. YOLACT box head shares the convs for box and cls branches. 3. YOLACT box head uses OHEM instead of Focal loss. 4. YOLACT box head predicts a set of mask coefficients for each box. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. anchor_generator (dict): Config dict for anchor generator loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. num_head_convs (int): Number of the conv layers shared by box and cls branches. num_protos (int): Number of the mask coefficients. use_ohem (bool): If true, ``loss_single_OHEM`` will be used for cls loss calculation. If false, ``loss_single`` will be used. conv_cfg (dict): Dictionary to construct and config conv layer. norm_cfg (dict): Dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True, conv_cfg=None, norm_cfg=None, init_cfg=dict( type='Xavier', distribution='uniform', bias=0, layer='Conv2d'), **kwargs): self.num_head_convs = num_head_convs self.num_protos = num_protos self.use_ohem = use_ohem self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(YOLACTHead, self).__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) if self.use_ohem: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.sampling = False def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.head_convs = ModuleList() for i in range(self.num_head_convs): chn = self.in_channels if i == 0 else self.feat_channels self.head_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.conv_coeff = nn.Conv2d( self.feat_channels, self.num_base_priors * self.num_protos, 3, padding=1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_anchors * 4. coeff_pred (Tensor): Mask coefficients for a single scale \ level, the channels number is num_anchors * num_protos. """ for head_conv in self.head_convs: x = head_conv(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) coeff_pred = self.conv_coeff(x).tanh() return cls_score, bbox_pred, coeff_pred @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """A combination of the func:``AnchorHead.loss`` and func:``SSDHead.loss``. When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, otherwise, it follows ``AnchorHead.loss``. Besides, it additionally returns ``sampling_results``. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Default: None Returns: tuple: dict[str, Tensor]: A dictionary of loss components. List[:obj:``SamplingResult``]: Sampler results for each image. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, unmap_outputs=not self.use_ohem, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results) = cls_reg_targets if self.use_ohem: num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single_OHEM, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) else: num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """"See func:``SSDHead.loss``.""" loss_cls_all = self.loss_cls(cls_score, labels, label_weights) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) if num_pos_samples == 0: num_neg_samples = neg_inds.size(0) else: num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds')) def get_bboxes(self, cls_scores, bbox_preds, coeff_preds, img_metas, cfg=None, rescale=False): """"Similar to func:``AnchorHead.get_bboxes``, but additionally processes coeff_preds. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. Default: False. Returns: list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is a 3-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is an (n,) tensor where each item is the predicted class label of the corresponding box. The third item is an (n, num_protos) tensor where each item is the predicted mask coefficients of instance inside the corresponding box. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) det_bboxes = [] det_labels = [] det_coeffs = [] for img_id in range(len(img_metas)): cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) coeff_pred_list = select_single_mlvl(coeff_preds, img_id) img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list, coeff_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale) det_bboxes.append(bbox_res[0]) det_labels.append(bbox_res[1]) det_coeffs.append(bbox_res[2]) return det_bboxes, det_labels, det_coeffs def _get_bboxes_single(self, cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """"Similar to func:``AnchorHead._get_bboxes_single``, but additionally processes coeff_preds_list and uses fast NMS instead of traditional NMS. Args: cls_score_list (list[Tensor]): Box scores for a single scale level Has shape (num_anchors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas for a single scale level with shape (num_anchors * 4, H, W). coeff_preds_list (list[Tensor]): Mask coefficients for a single scale level with shape (num_anchors * num_protos, H, W). mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Returns: tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is an (n,) tensor where each item is the predicted class label of the corresponding box. The third item is an (n, num_protos) tensor where each item is the predicted mask coefficients of instance inside the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_coeffs = [] for cls_score, bbox_pred, coeff_pred, anchors in \ zip(cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) coeff_pred = coeff_pred.permute(1, 2, 0).reshape(-1, self.num_protos) if 0 < nms_pre < scores.shape[0]: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] coeff_pred = coeff_pred[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_coeffs.append(coeff_pred) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) mlvl_coeffs = torch.cat(mlvl_coeffs) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores, mlvl_coeffs, cfg.score_thr, cfg.iou_thr, cfg.top_k, cfg.max_per_img) return det_bboxes, det_labels, det_coeffs @HEADS.register_module() class YOLACTSegmHead(BaseModule): """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689. Apply a semantic segmentation loss on feature space using layers that are only evaluated during training to increase performance with no speed penalty. Args: in_channels (int): Number of channels in the input feature map. num_classes (int): Number of categories excluding the background category. loss_segm (dict): Config of semantic segmentation loss. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels=256, loss_segm=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg=dict( type='Xavier', distribution='uniform', override=dict(name='segm_conv'))): super(YOLACTSegmHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.loss_segm = build_loss(loss_segm) self._init_layers() self.fp16_enabled = False def _init_layers(self): """Initialize layers of the head.""" self.segm_conv = nn.Conv2d( self.in_channels, self.num_classes, kernel_size=1) def forward(self, x): """Forward feature from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: Predicted semantic segmentation map with shape (N, num_classes, H, W). """ return self.segm_conv(x) @force_fp32(apply_to=('segm_pred', )) def loss(self, segm_pred, gt_masks, gt_labels): """Compute loss of the head. Args: segm_pred (list[Tensor]): Predicted semantic segmentation map with shape (N, num_classes, H, W). gt_masks (list[Tensor]): Ground truth masks for each image with the same shape of the input image. gt_labels (list[Tensor]): Class indices corresponding to each box. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_segm = [] num_imgs, num_classes, mask_h, mask_w = segm_pred.size() for idx in range(num_imgs): cur_segm_pred = segm_pred[idx] cur_gt_masks = gt_masks[idx].float() cur_gt_labels = gt_labels[idx] segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks, cur_gt_labels) if segm_targets is None: loss = self.loss_segm(cur_segm_pred, torch.zeros_like(cur_segm_pred), torch.zeros_like(cur_segm_pred)) else: loss = self.loss_segm( cur_segm_pred, segm_targets, avg_factor=num_imgs * mask_h * mask_w) loss_segm.append(loss) return dict(loss_segm=loss_segm) def get_targets(self, segm_pred, gt_masks, gt_labels): """Compute semantic segmentation targets for each image. Args: segm_pred (Tensor): Predicted semantic segmentation map with shape (num_classes, H, W). gt_masks (Tensor): Ground truth masks for each image with the same shape of the input image. gt_labels (Tensor): Class indices corresponding to each box. Returns: Tensor: Semantic segmentation targets with shape (num_classes, H, W). """ if gt_masks.size(0) == 0: return None num_classes, mask_h, mask_w = segm_pred.size() with torch.no_grad(): downsampled_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) downsampled_masks = downsampled_masks.gt(0.5).float() segm_targets = torch.zeros_like(segm_pred, requires_grad=False) for obj_idx in range(downsampled_masks.size(0)): segm_targets[gt_labels[obj_idx] - 1] = torch.max( segm_targets[gt_labels[obj_idx] - 1], downsampled_masks[obj_idx]) return segm_targets def simple_test(self, feats, img_metas, rescale=False): """Test function without test-time augmentation.""" raise NotImplementedError( 'simple_test of YOLACTSegmHead is not implemented ' 'because this head is only evaluated during training') @HEADS.register_module() class YOLACTProtonet(BaseModule): """YOLACT mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask prototypes for YOLACT. Args: in_channels (int): Number of channels in the input feature map. proto_channels (tuple[int]): Output channels of protonet convs. proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. include_last_relu (Bool): If keep the last relu of protonet. num_protos (int): Number of prototypes. num_classes (int): Number of categories excluding the background category. loss_mask_weight (float): Reweight the mask loss by this factor. max_masks_to_train (int): Maximum number of masks to train for each image. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels=256, proto_channels=(256, 256, 256, None, 256, 32), proto_kernel_sizes=(3, 3, 3, -2, 3, 1), include_last_relu=True, num_protos=32, loss_mask_weight=1.0, max_masks_to_train=100, init_cfg=dict( type='Xavier', distribution='uniform', override=dict(name='protonet'))): super(YOLACTProtonet, self).__init__(init_cfg) self.in_channels = in_channels self.proto_channels = proto_channels self.proto_kernel_sizes = proto_kernel_sizes self.include_last_relu = include_last_relu self.protonet = self._init_layers() self.loss_mask_weight = loss_mask_weight self.num_protos = num_protos self.num_classes = num_classes self.max_masks_to_train = max_masks_to_train self.fp16_enabled = False def _init_layers(self): """A helper function to take a config setting and turn it into a network.""" # Possible patterns: # ( 256, 3) -> conv # ( 256,-2) -> deconv # (None,-2) -> bilinear interpolate in_channels = self.in_channels protonets = ModuleList() for num_channels, kernel_size in zip(self.proto_channels, self.proto_kernel_sizes): if kernel_size > 0: layer = nn.Conv2d( in_channels, num_channels, kernel_size, padding=kernel_size // 2) else: if num_channels is None: layer = InterpolateModule( scale_factor=-kernel_size, mode='bilinear', align_corners=False) else: layer = nn.ConvTranspose2d( in_channels, num_channels, -kernel_size, padding=kernel_size // 2) protonets.append(layer) protonets.append(nn.ReLU(inplace=True)) in_channels = num_channels if num_channels is not None \ else in_channels if not self.include_last_relu: protonets = protonets[:-1] return nn.Sequential(*protonets) def forward_dummy(self, x): prototypes = self.protonet(x) return prototypes def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None): """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. coeff_pred (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W). bboxes (list[Tensor]): Box used for cropping with shape (N, num_anchors * 4, H, W). During training, they are ground truth boxes. During testing, they are predicted boxes. img_meta (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. sampling_results (List[:obj:``SamplingResult``]): Sampler results for each image. Returns: list[Tensor]: Predicted instance segmentation masks. """ prototypes = self.protonet(x) prototypes = prototypes.permute(0, 2, 3, 1).contiguous() num_imgs = x.size(0) # The reason for not using self.training is that # val workflow will have a dimension mismatch error. # Note that this writing method is very tricky. # Fix https://github.com/open-mmlab/mmdetection/issues/5978 is_train_or_val_workflow = (coeff_pred[0].dim() == 4) # Train or val workflow if is_train_or_val_workflow: coeff_pred_list = [] for coeff_pred_per_level in coeff_pred: coeff_pred_per_level = \ coeff_pred_per_level.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos) coeff_pred_list.append(coeff_pred_per_level) coeff_pred = torch.cat(coeff_pred_list, dim=1) mask_pred_list = [] for idx in range(num_imgs): cur_prototypes = prototypes[idx] cur_coeff_pred = coeff_pred[idx] cur_bboxes = bboxes[idx] cur_img_meta = img_meta[idx] # Testing state if not is_train_or_val_workflow: bboxes_for_cropping = cur_bboxes else: cur_sampling_results = sampling_results[idx] pos_assigned_gt_inds = \ cur_sampling_results.pos_assigned_gt_inds bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone() pos_inds = cur_sampling_results.pos_inds cur_coeff_pred = cur_coeff_pred[pos_inds] # Linearly combine the prototypes with the mask coefficients mask_pred = cur_prototypes @ cur_coeff_pred.t() mask_pred = torch.sigmoid(mask_pred) h, w = cur_img_meta['img_shape'][:2] bboxes_for_cropping[:, 0] /= w bboxes_for_cropping[:, 1] /= h bboxes_for_cropping[:, 2] /= w bboxes_for_cropping[:, 3] /= h mask_pred = self.crop(mask_pred, bboxes_for_cropping) mask_pred = mask_pred.permute(2, 0, 1).contiguous() mask_pred_list.append(mask_pred) return mask_pred_list @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results): """Compute loss of the head. Args: mask_pred (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). gt_masks (list[Tensor]): Ground truth masks for each image with the same shape of the input image. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_meta (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. sampling_results (List[:obj:``SamplingResult``]): Sampler results for each image. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_mask = [] num_imgs = len(mask_pred) total_pos = 0 for idx in range(num_imgs): cur_mask_pred = mask_pred[idx] cur_gt_masks = gt_masks[idx].float() cur_gt_bboxes = gt_bboxes[idx] cur_img_meta = img_meta[idx] cur_sampling_results = sampling_results[idx] pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds num_pos = pos_assigned_gt_inds.size(0) # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if num_pos > self.max_masks_to_train: perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] cur_mask_pred = cur_mask_pred[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train total_pos += num_pos gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds] mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks, pos_assigned_gt_inds) if num_pos == 0: loss = cur_mask_pred.sum() * 0. elif mask_targets is None: loss = F.binary_cross_entropy(cur_mask_pred, torch.zeros_like(cur_mask_pred), torch.zeros_like(cur_mask_pred)) else: cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1) loss = F.binary_cross_entropy( cur_mask_pred, mask_targets, reduction='none') * self.loss_mask_weight h, w = cur_img_meta['img_shape'][:2] gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - gt_bboxes_for_reweight[:, 0]) / w gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - gt_bboxes_for_reweight[:, 1]) / h loss = loss.mean(dim=(1, 2)) / gt_bboxes_width / gt_bboxes_height loss = torch.sum(loss) loss_mask.append(loss) if total_pos == 0: total_pos += 1 # avoid nan loss_mask = [x / total_pos for x in loss_mask] return dict(loss_mask=loss_mask) def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds): """Compute instance segmentation targets for each image. Args: mask_pred (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_masks (Tensor): Ground truth masks for each image with the same shape of the input image. pos_assigned_gt_inds (Tensor): GT indices of the corresponding positive samples. Returns: Tensor: Instance segmentation targets with shape (num_instances, H, W). """ if gt_masks.size(0) == 0: return None mask_h, mask_w = mask_pred.shape[-2:] gt_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) gt_masks = gt_masks.gt(0.5).float() mask_targets = gt_masks[pos_assigned_gt_inds] return mask_targets def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale): """Resize, binarize, and format the instance mask predictions. Args: mask_pred (Tensor): shape (N, H, W). label_pred (Tensor): shape (N, ). img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. Returns: list[ndarray]: Mask predictions grouped by their predicted classes. """ ori_shape = img_meta['ori_shape'] scale_factor = img_meta['scale_factor'] if rescale: img_h, img_w = ori_shape[:2] else: img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32) img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32) cls_segms = [[] for _ in range(self.num_classes)] if mask_pred.size(0) == 0: return cls_segms mask_pred = F.interpolate( mask_pred.unsqueeze(0), (img_h, img_w), mode='bilinear', align_corners=False).squeeze(0) > 0.5 mask_pred = mask_pred.cpu().numpy().astype(np.uint8) for m, l in zip(mask_pred, label_pred): cls_segms[l].append(m) return cls_segms def crop(self, masks, boxes, padding=1): """Crop predicted masks by zeroing out everything not in the predicted bbox. Args: masks (Tensor): shape [H, W, N]. boxes (Tensor): bbox coords in relative point form with shape [N, 4]. Return: Tensor: The cropped masks. """ h, w, n = masks.size() x1, x2 = self.sanitize_coordinates( boxes[:, 0], boxes[:, 2], w, padding, cast=False) y1, y2 = self.sanitize_coordinates( boxes[:, 1], boxes[:, 3], h, padding, cast=False) rows = torch.arange( w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n) cols = torch.arange( h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n) masks_left = rows >= x1.view(1, 1, -1) masks_right = rows < x2.view(1, 1, -1) masks_up = cols >= y1.view(1, 1, -1) masks_down = cols < y2.view(1, 1, -1) crop_mask = masks_left * masks_right * masks_up * masks_down return masks * crop_mask.float() def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True): """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size. Also converts from relative to absolute coordinates and casts the results to long tensors. Warning: this does things in-place behind the scenes so copy if necessary. Args: _x1 (Tensor): shape (N, ). _x2 (Tensor): shape (N, ). img_size (int): Size of the input image. padding (int): x1 >= padding, x2 <= image_size-padding. cast (bool): If cast is false, the result won't be cast to longs. Returns: tuple: x1 (Tensor): Sanitized _x1. x2 (Tensor): Sanitized _x2. """ x1 = x1 * img_size x2 = x2 * img_size if cast: x1 = x1.long() x2 = x2.long() x1 = torch.min(x1, x2) x2 = torch.max(x1, x2) x1 = torch.clamp(x1 - padding, min=0) x2 = torch.clamp(x2 + padding, max=img_size) return x1, x2 def simple_test(self, feats, det_bboxes, det_labels, det_coeffs, img_metas, rescale=False): """Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. det_bboxes (list[Tensor]): BBox results of each image. each element is (n, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. det_labels (list[Tensor]): BBox results of each image. each element is (n, ) tensor, each element represents the class label of the corresponding box. det_coeffs (list[Tensor]): BBox coefficient of each image. each element is (n, m) tensor, m is vector length. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list]: encoded masks. The c-th item in the outer list corresponds to the c-th class. Given the c-th outer list, the i-th item in that inner list is the mask for the i-th box with class label c. """ num_imgs = len(img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_preds = self.forward(feats[0], det_coeffs, _bboxes, img_metas) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append([[] for _ in range(self.num_classes)]) else: segm_result = self.get_seg_masks(mask_preds[i], det_labels[i], img_metas[i], rescale) segm_results.append(segm_result) return segm_results class InterpolateModule(BaseModule): """This is a module version of F.interpolate. Any arguments you give it just get passed along for the ride. """ def __init__(self, *args, init_cfg=None, **kwargs): super().__init__(init_cfg) self.args = args self.kwargs = kwargs def forward(self, x): """Forward features from the upstream network.""" return F.interpolate(x, *self.args, **self.kwargs)
43,474
41.664377
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .centripetal_head import CentripetalHead from .corner_head import CornerHead from .deformable_detr_head import DeformableDETRHead from .detr_head import DETRHead from .embedding_rpn_head import EmbeddingRPNHead from .fcos_head import FCOSHead from .fovea_head import FoveaHead from .free_anchor_retina_head import FreeAnchorRetinaHead from .fsaf_head import FSAFHead from .ga_retina_head import GARetinaHead from .ga_rpn_head import GARPNHead from .gfl_head import GFLHead from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead from .lad_head import LADHead from .ld_head import LDHead from .nasfcos_head import NASFCOSHead from .paa_head import PAAHead from .pisa_retinanet_head import PISARetinaHead from .pisa_ssd_head import PISASSDHead from .reppoints_head import RepPointsHead from .retina_head import RetinaHead from .retina_sepbn_head import RetinaSepBNHead from .rpn_head import RPNHead from .sabl_retina_head import SABLRetinaHead from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead from .ssd_head import SSDHead from .tood_head import TOODHead from .vfnet_head import VFNetHead from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead from .yolo_head import YOLOV3Head from .yolof_head import YOLOFHead from .yolox_head import YOLOXHead from .dsla_head import DSLAHead __all__ = [ 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead', 'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'DSLAHead' ]
2,422
43.054545
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/base_dense_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch from mmcv.cnn.utils.weight_init import constant_init from mmcv.ops import batched_nms from mmcv.runner import BaseModule, force_fp32 from mmdet.core.utils import filter_scores_and_topk, select_single_mlvl class BaseDenseHead(BaseModule, metaclass=ABCMeta): """Base class for DenseHeads.""" def __init__(self, init_cfg=None): super(BaseDenseHead, self).__init__(init_cfg) def init_weights(self): super(BaseDenseHead, self).init_weights() # avoid init_cfg overwrite the initialization of `conv_offset` for m in self.modules(): # DeformConv2dPack, ModulatedDeformConv2dPack if hasattr(m, 'conv_offset'): constant_init(m.conv_offset, 0) @abstractmethod def loss(self, **kwargs): """Compute losses of the head.""" pass @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, score_factors=None, img_metas=None, cfg=None, rescale=False, with_nms=True, **kwargs): """Transform network outputs of a batch into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. img_metas (list[dict], Optional): Image meta info. Default None. cfg (mmcv.Config, Optional): Test / postprocessing configuration, if None, test_cfg would be used. Default None. rescale (bool): If True, return boxes in original image space. Default False. with_nms (bool): If True, do nms before return boxes. Default True. Returns: list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device) result_list = [] for img_id in range(len(img_metas)): img_meta = img_metas[img_id] cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) if with_score_factors: score_factor_list = select_single_mlvl(score_factors, img_id) else: score_factor_list = [None for _ in range(num_levels)] results = self._get_bboxes_single(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale, with_nms, **kwargs) result_list.append(results) return result_list def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] if with_score_factors: score_factor = score_factor[keep_idxs] bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) if with_score_factors: mlvl_score_factors.append(score_factor) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms, mlvl_score_factors, **kwargs) def _bbox_post_process(self, mlvl_scores, mlvl_labels, mlvl_bboxes, scale_factor, cfg, rescale=False, with_nms=True, mlvl_score_factors=None, **kwargs): """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: mlvl_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_bboxes, ). mlvl_labels (list[Tensor]): Box class labels from all scale levels of a single image, each item has shape (num_bboxes, ). mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale levels of a single image, each item has shape (num_bboxes, 4). scale_factor (ndarray, optional): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. mlvl_score_factors (list[Tensor], optional): Score factor from all scale levels of a single image, each item has shape (num_bboxes, ). Default: None. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ assert len(mlvl_scores) == len(mlvl_bboxes) == len(mlvl_labels) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) mlvl_labels = torch.cat(mlvl_labels) if mlvl_score_factors is not None: # TODO: Add sqrt operation in order to be consistent with # the paper. mlvl_score_factors = torch.cat(mlvl_score_factors) mlvl_scores = mlvl_scores * mlvl_score_factors if with_nms: if mlvl_bboxes.numel() == 0: det_bboxes = torch.cat([mlvl_bboxes, mlvl_scores[:, None]], -1) return det_bboxes, mlvl_labels det_bboxes, keep_idxs = batched_nms(mlvl_bboxes, mlvl_scores, mlvl_labels, cfg.nms) det_bboxes = det_bboxes[:cfg.max_per_img] det_labels = mlvl_labels[keep_idxs][:cfg.max_per_img] return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores, mlvl_labels def forward_train(self, x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, proposal_cfg=None, **kwargs): """ Args: x (list[Tensor]): Features from FPN. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). proposal_cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used Returns: tuple: losses: (dict[str, Tensor]): A dictionary of loss components. proposal_list (list[Tensor]): Proposals of each image. """ outs = self(x) if gt_labels is None: loss_inputs = outs + (gt_bboxes, img_metas) else: loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) if proposal_cfg is None: return losses else: proposal_list = self.get_bboxes( *outs, img_metas=img_metas, cfg=proposal_cfg) return losses, proposal_list def simple_test(self, feats, img_metas, rescale=False): """Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n, ). """ return self.simple_test_bboxes(feats, img_metas, rescale=rescale) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def onnx_export(self, cls_scores, bbox_preds, score_factors=None, img_metas=None, with_nms=True): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). score_factors (list[Tensor]): score_factors for each s cale level with shape (N, num_points * 1, H, W). Default: None. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. Default: None. with_nms (bool): Whether apply nms to the bboxes. Default: True. Returns: tuple[Tensor, Tensor] | list[tuple]: When `with_nms` is True, it is tuple[Tensor, Tensor], first tensor bboxes with shape [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) and second element is class labels of shape [N, num_det]. When `with_nms` is False, first tensor is bboxes with shape [N, num_det, 4], second tensor is raw score has shape [N, num_det, num_classes]. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] assert len( img_metas ) == 1, 'Only support one input image while in exporting to ONNX' img_shape = img_metas[0]['img_shape_for_onnx'] cfg = self.test_cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors) device = cls_scores[0].device batch_size = cls_scores[0].shape[0] # convert to tensor to keep tracing nms_pre_tensor = torch.tensor( cfg.get('nms_pre', -1), device=device, dtype=torch.long) # e.g. Retina, FreeAnchor, etc. if score_factors is None: with_score_factors = False mlvl_score_factor = [None for _ in range(num_levels)] else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True mlvl_score_factor = [ score_factors[i].detach() for i in range(num_levels) ] mlvl_score_factors = [] mlvl_batch_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, score_factors, priors in zip( mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, mlvl_priors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1, self.cls_out_channels) if self.use_sigmoid_cls: scores = scores.sigmoid() nms_pre_score = scores else: scores = scores.softmax(-1) nms_pre_score = scores if with_score_factors: score_factors = score_factors.permute(0, 2, 3, 1).reshape( batch_size, -1).sigmoid() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4) priors = priors.expand(batch_size, -1, priors.size(-1)) # Get top-k predictions from mmdet.core.export import get_k_for_topk nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) if nms_pre > 0: if with_score_factors: nms_pre_score = (nms_pre_score * score_factors[..., None]) else: nms_pre_score = nms_pre_score # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = nms_pre_score.max(-1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = nms_pre_score[..., :-1].max(-1) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange( batch_size, device=bbox_pred.device).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = bbox_pred.shape[1] * batch_inds + topk_inds priors = priors.reshape( -1, priors.size(-1))[transformed_inds, :].reshape( batch_size, -1, priors.size(-1)) bbox_pred = bbox_pred.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) scores = scores.reshape( -1, self.cls_out_channels)[transformed_inds, :].reshape( batch_size, -1, self.cls_out_channels) if with_score_factors: score_factors = score_factors.reshape( -1, 1)[transformed_inds].reshape(batch_size, -1) bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_batch_bboxes.append(bboxes) mlvl_scores.append(scores) if with_score_factors: mlvl_score_factors.append(score_factors) batch_bboxes = torch.cat(mlvl_batch_bboxes, dim=1) batch_scores = torch.cat(mlvl_scores, dim=1) if with_score_factors: batch_score_factors = torch.cat(mlvl_score_factors, dim=1) # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx if not self.use_sigmoid_cls: batch_scores = batch_scores[..., :self.num_classes] if with_score_factors: batch_scores = batch_scores * (batch_score_factors.unsqueeze(2)) if with_nms: max_output_boxes_per_class = cfg.nms.get( 'max_output_boxes_per_class', 200) iou_threshold = cfg.nms.get('iou_threshold', 0.5) score_threshold = cfg.score_thr nms_pre = cfg.get('deploy_nms_pre', -1) return add_dummy_nms_for_onnx(batch_bboxes, batch_scores, max_output_boxes_per_class, iou_threshold, score_threshold, nms_pre, cfg.max_per_img) else: return batch_bboxes, batch_scores
23,226
43.074004
106
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/free_anchor_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn.functional as F from mmdet.core import bbox_overlaps from ..builder import HEADS from .retina_head import RetinaHead EPS = 1e-12 @HEADS.register_module() class FreeAnchorRetinaHead(RetinaHead): """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). pre_anchor_topk (int): Number of boxes that be token in each bag. bbox_thr (float): The threshold of the saturated linear function. It is usually the same with the IoU threshold used in NMS. gamma (float): Gamma parameter in focal loss. alpha (float): Alpha parameter in focal loss. """ # noqa: W605 def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, pre_anchor_topk=50, bbox_thr=0.6, gamma=2.0, alpha=0.5, **kwargs): super(FreeAnchorRetinaHead, self).__init__(num_classes, in_channels, stacked_convs, conv_cfg, norm_cfg, **kwargs) self.pre_anchor_topk = pre_anchor_topk self.bbox_thr = bbox_thr self.gamma = gamma self.alpha = alpha def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels anchor_list, _ = self.get_anchors(featmap_sizes, img_metas) anchors = [torch.cat(anchor) for anchor in anchor_list] # concatenate each level cls_scores = [ cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1, self.cls_out_channels) for cls in cls_scores ] bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) for bbox_pred in bbox_preds ] cls_scores = torch.cat(cls_scores, dim=1) bbox_preds = torch.cat(bbox_preds, dim=1) cls_prob = torch.sigmoid(cls_scores) box_prob = [] num_pos = 0 positive_losses = [] for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, bbox_preds_) in enumerate( zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): with torch.no_grad(): if len(gt_bboxes_) == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(bbox_preds_) else: # box_localization: a_{j}^{loc}, shape: [j, 4] pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] t1 = self.bbox_thr t2 = object_box_iou.max( dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( min=0, max=1) # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] num_obj = gt_labels_.size(0) indices = torch.stack([ torch.arange(num_obj).type_as(gt_labels_), gt_labels_ ], dim=0) object_cls_box_prob = torch.sparse_coo_tensor( indices, object_box_prob) # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] """ from "start" to "end" implement: image_box_iou = torch.sparse.max(object_cls_box_prob, dim=0).t() """ # start box_cls_prob = torch.sparse.sum( object_cls_box_prob, dim=0).to_dense() indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() if indices.numel() == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(object_box_prob) else: nonzero_box_prob = torch.where( (gt_labels_.unsqueeze(dim=-1) == indices[0]), object_box_prob[:, indices[1]], torch.tensor([ 0 ]).type_as(object_box_prob)).max(dim=0).values # upmap to shape [j, c] image_box_prob = torch.sparse_coo_tensor( indices.flip([0]), nonzero_box_prob, size=(anchors_.size(0), self.cls_out_channels)).to_dense() # end box_prob.append(image_box_prob) # construct bags for objects match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) _, matched = torch.topk( match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False) del match_quality_matrix # matched_cls_prob: P_{ij}^{cls} matched_cls_prob = torch.gather( cls_prob_[matched], 2, gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, 1)).squeeze(2) # matched_box_prob: P_{ij}^{loc} matched_anchors = anchors_[matched] matched_object_targets = self.bbox_coder.encode( matched_anchors, gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) loss_bbox = self.loss_bbox( bbox_preds_[matched], matched_object_targets, reduction_override='none').sum(-1) matched_box_prob = torch.exp(-loss_bbox) # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} num_pos += len(gt_bboxes_) positive_losses.append( self.positive_bag_loss(matched_cls_prob, matched_box_prob)) positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) # box_prob: P{a_{j} \in A_{+}} box_prob = torch.stack(box_prob, dim=0) # negative_loss: # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( 1, num_pos * self.pre_anchor_topk) # avoid the absence of gradients in regression subnet # when no ground-truth in a batch if num_pos == 0: positive_loss = bbox_preds.sum() * 0 losses = { 'positive_bag_loss': positive_loss, 'negative_bag_loss': negative_loss } return losses def positive_bag_loss(self, matched_cls_prob, matched_box_prob): """Compute positive bag loss. :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. Args: matched_cls_prob (Tensor): Classification probability of matched samples in shape (num_gt, pre_anchor_topk). matched_box_prob (Tensor): BBox probability of matched samples, in shape (num_gt, pre_anchor_topk). Returns: Tensor: Positive bag loss in shape (num_gt,). """ # noqa: E501, W605 # bag_prob = Mean-max(matched_prob) matched_prob = matched_cls_prob * matched_box_prob weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) weight /= weight.sum(dim=1).unsqueeze(dim=-1) bag_prob = (weight * matched_prob).sum(dim=1) # positive_bag_loss = -self.alpha * log(bag_prob) return self.alpha * F.binary_cross_entropy( bag_prob, torch.ones_like(bag_prob), reduction='none') def negative_bag_loss(self, cls_prob, box_prob): """Compute negative bag loss. :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. :math:`P_{j}^{bg}`: Classification probability of negative samples. Args: cls_prob (Tensor): Classification probability, in shape (num_img, num_anchors, num_classes). box_prob (Tensor): Box probability, in shape (num_img, num_anchors, num_classes). Returns: Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. prob = prob.clamp(min=EPS, max=1 - EPS) negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( prob, torch.zeros_like(prob), reduction='none') return (1 - self.alpha) * negative_bag_loss
11,189
40.139706
94
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/guided_anchor_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.ops import DeformConv2d, MaskedConv2d from mmcv.runner import BaseModule, force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, build_prior_generator, build_sampler, calc_region, images_to_levels, multi_apply, multiclass_nms, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class FeatureAdaption(BaseModule): """Feature Adaption Module. Feature Adaption Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Deformable conv kernel size. deform_groups (int): Deformable conv group size. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, kernel_size=3, deform_groups=4, init_cfg=dict( type='Normal', layer='Conv2d', std=0.1, override=dict( type='Normal', name='conv_adaption', std=0.01))): super(FeatureAdaption, self).__init__(init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 2, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x, shape): offset = self.conv_offset(shape.detach()) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module() class GuidedAnchorHead(AnchorHead): """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). This GuidedAnchorHead will predict high-quality feature guided anchors and locations where anchors will be kept in inference. There are mainly 3 categories of bounding-boxes. - Sampled 9 pairs for target assignment. (approxes) - The square boxes where the predicted anchors are based on. (squares) - Guided anchors. Please refer to https://arxiv.org/abs/1901.03278 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. approx_anchor_generator (dict): Config dict for approx generator square_anchor_generator (dict): Config dict for square generator anchor_coder (dict): Config dict for anchor coder bbox_coder (dict): Config dict for bbox coder reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. deform_groups: (int): Group number of DCN in FeatureAdaption module. loc_filter_thr (float): Threshold to filter out unconcerned regions. loss_loc (dict): Config of location loss. loss_shape (dict): Config of anchor shape loss. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of bbox regression loss. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes, in_channels, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0] ), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0] ), reg_decoded_bbox=False, deform_groups=4, loc_filter_thr=0.01, train_cfg=None, test_cfg=None, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv_loc', std=0.01, bias_prob=0.01))): # yapf: disable super(AnchorHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.deform_groups = deform_groups self.loc_filter_thr = loc_filter_thr # build approx_anchor_generator and square_anchor_generator assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = build_prior_generator( approx_anchor_generator) self.square_anchor_generator = build_prior_generator( square_anchor_generator) self.approxs_per_octave = self.approx_anchor_generator \ .num_base_priors[0] self.reg_decoded_bbox = reg_decoded_bbox # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] self.sampling = loss_cls['type'] not in ['FocalLoss'] self.ga_sampling = train_cfg is not None and hasattr( train_cfg, 'ga_sampler') if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 # build bbox_coder self.anchor_coder = build_bbox_coder(anchor_coder) self.bbox_coder = build_bbox_coder(bbox_coder) # build losses self.loss_loc = build_loss(loss_loc) self.loss_shape = build_loss(loss_shape) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.ga_assigner = build_assigner(self.train_cfg.ga_assigner) if self.ga_sampling: ga_sampler_cfg = self.train_cfg.ga_sampler else: ga_sampler_cfg = dict(type='PseudoSampler') self.ga_sampler = build_sampler(ga_sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() @property def num_anchors(self): warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.square_anchor_generator.num_base_priors[0] def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2, 1) self.feature_adaption = FeatureAdaption( self.in_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) self.conv_reg = MaskedConv2d(self.feat_channels, self.num_base_priors * 4, 1) def forward_single(self, x): loc_pred = self.conv_loc(x) shape_pred = self.conv_shape(x) x = self.feature_adaption(x, shape_pred) # masked conv is only used during inference for speed-up if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.conv_cls(x, mask) bbox_pred = self.conv_reg(x, mask) return cls_score, bbox_pred, shape_pred, loc_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'): """Get sampled approxs and inside flags according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: approxes of each image, inside flags of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # approxes for one time multi_level_approxs = self.approx_anchor_generator.grid_priors( featmap_sizes, device=device) approxs_list = [multi_level_approxs for _ in range(num_imgs)] # for each image, we compute inside flags of multi level approxes inside_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] multi_level_approxs = approxs_list[img_id] # obtain valid flags for each approx first multi_level_approx_flags = self.approx_anchor_generator \ .valid_flags(featmap_sizes, img_meta['pad_shape'], device=device) for i, flags in enumerate(multi_level_approx_flags): approxs = multi_level_approxs[i] inside_flags_list = [] for i in range(self.approxs_per_octave): split_valid_flags = flags[i::self.approxs_per_octave] split_approxs = approxs[i::self.approxs_per_octave, :] inside_flags = anchor_inside_flags( split_approxs, split_valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) inside_flags_list.append(inside_flags) # inside_flag for a position is true if any anchor in this # position is true inside_flags = ( torch.stack(inside_flags_list, 0).sum(dim=0) > 0) multi_level_flags.append(inside_flags) inside_flag_list.append(multi_level_flags) return approxs_list, inside_flag_list def get_anchors(self, featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=False, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. shape_preds (list[tensor]): Multi-level shape predictions. loc_preds (list[tensor]): Multi-level location predictions. img_metas (list[dict]): Image meta info. use_loc_filter (bool): Use loc filter or not. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image, guided anchors of each image, loc masks of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] # for each image, we compute multi level guided anchors guided_anchors_list = [] loc_mask_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_guided_anchors = [] multi_level_loc_mask = [] for i in range(num_levels): squares = squares_list[img_id][i] shape_pred = shape_preds[i][img_id] loc_pred = loc_preds[i][img_id] guided_anchors, loc_mask = self._get_guided_anchors_single( squares, shape_pred, loc_pred, use_loc_filter=use_loc_filter) multi_level_guided_anchors.append(guided_anchors) multi_level_loc_mask.append(loc_mask) guided_anchors_list.append(multi_level_guided_anchors) loc_mask_list.append(multi_level_loc_mask) return squares_list, guided_anchors_list, loc_mask_list def _get_guided_anchors_single(self, squares, shape_pred, loc_pred, use_loc_filter=False): """Get guided anchors and loc masks for a single level. Args: square (tensor): Squares of a single level. shape_pred (tensor): Shape predictions of a single level. loc_pred (tensor): Loc predictions of a single level. use_loc_filter (list[tensor]): Use loc filter or not. Returns: tuple: guided anchors, location masks """ # calculate location filtering mask loc_pred = loc_pred.sigmoid().detach() if use_loc_filter: loc_mask = loc_pred >= self.loc_filter_thr else: loc_mask = loc_pred >= 0.0 mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors) mask = mask.contiguous().view(-1) # calculate guided anchors squares = squares[mask] anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( -1, 2).detach()[mask] bbox_deltas = anchor_deltas.new_full(squares.size(), 0) bbox_deltas[:, 2:] = anchor_deltas guided_anchors = self.anchor_coder.decode( squares, bbox_deltas, wh_ratio_clip=1e-6) return guided_anchors, mask def ga_loc_targets(self, gt_bboxes_list, featmap_sizes): """Compute location targets for guided anchoring. Each feature map is divided into positive, negative and ignore regions. - positive regions: target 1, weight 1 - ignore regions: target 0, weight 0 - negative regions: target 0, weight 0.1 Args: gt_bboxes_list (list[Tensor]): Gt bboxes of each image. featmap_sizes (list[tuple]): Multi level sizes of each feature maps. Returns: tuple """ anchor_scale = self.approx_anchor_generator.octave_base_scale anchor_strides = self.approx_anchor_generator.strides # Currently only supports same stride in x and y direction. for stride in anchor_strides: assert (stride[0] == stride[1]) anchor_strides = [stride[0] for stride in anchor_strides] center_ratio = self.train_cfg.center_ratio ignore_ratio = self.train_cfg.ignore_ratio img_per_gpu = len(gt_bboxes_list) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = gt_bboxes_list[img_id] scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) # assign gt bboxes to different feature levels w.r.t. their scales target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] # calculate ignore regions ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) # calculate positive (center) regions ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 # calculate ignore map on nearby low level feature if lvl > 0: d_lvl = lvl - 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 # calculate ignore map on nearby high level feature if lvl < num_lvls - 1: u_lvl = lvl + 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): # ignore negative regions w.r.t. ignore map all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 # set negative regions with weight 0.1 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 # loc average factor to balance loss loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def _ga_shape_target_single(self, flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, img_meta, unmap_outputs=True): """Compute guided anchoring targets. This function returns sampled anchors and gt bboxes directly rather than calculates regression targets. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image. img_meta (dict): Meta info of a single image. approxs_per_octave (int): number of approxs per octave cfg (dict): RPN train configs. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ if not inside_flags.any(): return (None, ) * 5 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, self.approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] assign_result = self.ga_assigner.assign(approxs, squares, self.approxs_per_octave, gt_bboxes, gt_bboxes_ignore) sampling_result = self.ga_sampler.sample(assign_result, squares, gt_bboxes) bbox_anchors = torch.zeros_like(squares) bbox_gts = torch.zeros_like(squares) bbox_weights = torch.zeros_like(squares) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes bbox_weights[pos_inds, :] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) def ga_shape_targets(self, approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, unmap_outputs=True): """Compute guided anchoring targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, unmap_outputs=unmap_outputs) # no valid anchors if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg) def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, anchor_weights, anchor_total_num): shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) bbox_anchors = bbox_anchors.contiguous().view(-1, 4) bbox_gts = bbox_gts.contiguous().view(-1, 4) anchor_weights = anchor_weights.contiguous().view(-1, 4) bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) bbox_deltas[:, 2:] += shape_pred # filter out negative samples to speed-up weighted_bounded_iou_loss inds = torch.nonzero( anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) bbox_deltas_ = bbox_deltas[inds] bbox_anchors_ = bbox_anchors[inds] bbox_gts_ = bbox_gts[inds] anchor_weights_ = anchor_weights[inds] pred_anchors_ = self.anchor_coder.decode( bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) loss_shape = self.loss_shape( pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=anchor_total_num) return loss_shape def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor): loss_loc = self.loss_loc( loc_pred.reshape(-1, 1), loc_target.reshape(-1).long(), loc_weight.reshape(-1), avg_factor=loc_avg_factor) return loss_loc @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get loc targets loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( gt_bboxes, featmap_sizes) # get sampled approxes approxs_list, inside_flag_list = self.get_sampled_approxs( featmap_sizes, img_metas, device=device) # get squares and guided anchors squares_list, guided_anchors_list, _ = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, device=device) # get shape targets shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, squares_list, gt_bboxes, img_metas) if shape_targets is None: return None (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, anchor_bg_num) = shape_targets anchor_total_num = ( anchor_fg_num if not self.ga_sampling else anchor_fg_num + anchor_bg_num) # get anchor targets label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( guided_anchors_list, inside_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [ anchors.size(0) for anchors in guided_anchors_list[0] ] # concat all level anchors to a single tensor concat_anchor_list = [] for i in range(len(guided_anchors_list)): concat_anchor_list.append(torch.cat(guided_anchors_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) # get classification and bbox regression losses losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) # get anchor location loss losses_loc = [] for i in range(len(loc_preds)): loss_loc = self.loss_loc_single( loc_preds[i], loc_targets[i], loc_weights[i], loc_avg_factor=loc_avg_factor) losses_loc.append(loss_loc) # get anchor shape loss losses_shape = [] for i in range(len(shape_preds)): loss_shape = self.loss_shape_single( shape_preds[i], bbox_anchors_list[i], bbox_gts_list[i], anchor_weights_list[i], anchor_total_num=anchor_total_num) losses_shape.append(loss_shape) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shape=losses_shape, loss_loc=losses_loc) @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def get_bboxes(self, cls_scores, bbox_preds, shape_preds, loc_preds, img_metas, cfg=None, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( loc_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # get guided anchors _, guided_anchors, loc_masks = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=not self.training, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] guided_anchor_list = [ guided_anchors[img_id][i].detach() for i in range(num_levels) ] loc_mask_list = [ loc_masks[img_id][i].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, guided_anchor_list, loc_mask_list, img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, mlvl_anchors, mlvl_masks): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue # reshape scores and bbox_pred cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask, :] bbox_pred = bbox_pred[mask, :] if scores.dim() == 0: anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) bbox_pred = bbox_pred.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) # multi class NMS det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels
37,334
41.963176
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/yolof_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, normal_init) from mmcv.runner import force_fp32 from mmdet.core import anchor_inside_flags, multi_apply, reduce_mean, unmap from ..builder import HEADS from .anchor_head import AnchorHead INF = 1e8 def levels_to_images(mlvl_tensor): """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[torch.Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] @HEADS.register_module() class YOLOFHead(AnchorHead): """YOLOFHead Paper link: https://arxiv.org/abs/2103.09460. Args: num_classes (int): The number of object classes (w/o background) in_channels (List[int]): The number of input channels per scale. cls_num_convs (int): The number of convolutions of cls branch. Default 2. reg_num_convs (int): The number of convolutions of reg branch. Default 4. norm_cfg (dict): Dictionary to construct and config norm layer. """ def __init__(self, num_classes, in_channels, num_cls_convs=2, num_reg_convs=4, norm_cfg=dict(type='BN', requires_grad=True), **kwargs): self.num_cls_convs = num_cls_convs self.num_reg_convs = num_reg_convs self.norm_cfg = norm_cfg super(YOLOFHead, self).__init__(num_classes, in_channels, **kwargs) def _init_layers(self): cls_subnet = [] bbox_subnet = [] for i in range(self.num_cls_convs): cls_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) for i in range(self.num_reg_convs): bbox_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) self.cls_subnet = nn.Sequential(*cls_subnet) self.bbox_subnet = nn.Sequential(*bbox_subnet) self.cls_score = nn.Conv2d( self.in_channels, self.num_base_priors * self.num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = nn.Conv2d( self.in_channels, self.num_base_priors * 4, kernel_size=3, stride=1, padding=1) self.object_pred = nn.Conv2d( self.in_channels, self.num_base_priors, kernel_size=3, stride=1, padding=1) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability bias_cls = bias_init_with_prob(0.01) torch.nn.init.constant_(self.cls_score.bias, bias_cls) def forward_single(self, feature): cls_score = self.cls_score(self.cls_subnet(feature)) N, _, H, W = cls_score.shape cls_score = cls_score.view(N, -1, self.num_classes, H, W) reg_feat = self.bbox_subnet(feature) bbox_reg = self.bbox_pred(reg_feat) objectness = self.object_pred(reg_feat) # implicit objectness objectness = objectness.view(N, -1, 1, H, W) normalized_cls_score = cls_score + objectness - torch.log( 1. + torch.clamp(cls_score.exp(), max=INF) + torch.clamp(objectness.exp(), max=INF)) normalized_cls_score = normalized_cls_score.view(N, -1, H, W) return normalized_cls_score, bbox_reg @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (batch, num_anchors * num_classes, h, w) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (batch, num_anchors * 4, h, w) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == 1 assert self.prior_generator.num_levels == 1 device = cls_scores[0].device featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) # The output level is always 1 anchor_list = [anchors[0] for anchors in anchor_list] valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] cls_scores_list = levels_to_images(cls_scores) bbox_preds_list = levels_to_images(bbox_preds) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (batch_labels, batch_label_weights, num_total_pos, num_total_neg, batch_bbox_weights, batch_pos_predicted_boxes, batch_target_boxes) = cls_reg_targets flatten_labels = batch_labels.reshape(-1) batch_label_weights = batch_label_weights.reshape(-1) cls_score = cls_scores[0].permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) num_total_samples = (num_total_pos + num_total_neg) if self.sampling else num_total_pos num_total_samples = reduce_mean( cls_score.new_tensor(num_total_samples)).clamp_(1.0).item() # classification loss loss_cls = self.loss_cls( cls_score, flatten_labels, batch_label_weights, avg_factor=num_total_samples) # regression loss if batch_pos_predicted_boxes.shape[0] == 0: # no pos sample loss_bbox = batch_pos_predicted_boxes.sum() * 0 else: loss_bbox = self.loss_bbox( batch_pos_predicted_boxes, batch_target_boxes, batch_bbox_weights.float(), avg_factor=num_total_samples) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, cls_scores_list, bbox_preds_list, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores_list (list[Tensor]): Classification scores of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * num_classes). bbox_preds_list (list[Tensor]): Bbox preds of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * 4). anchor_list (list[Tensor]): Anchors of each image. Each element of is a tensor of shape (h * w * num_anchors, 4). valid_flag_list (list[Tensor]): Valid flags of each image. Each element of is a tensor of shape (h * w * num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - batch_labels (Tensor): Label of all images. Each element \ of is a tensor of shape (batch, h * w * num_anchors) - batch_label_weights (Tensor): Label weights of all images \ of is a tensor of shape (batch, h * w * num_anchors) - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, bbox_preds_list, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, pos_inds_list, neg_inds_list, sampling_results_list) = results[:5] rest_results = list(results[5:]) # user-added return values # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) batch_labels = torch.stack(all_labels, 0) batch_label_weights = torch.stack(all_label_weights, 0) res = (batch_labels, batch_label_weights, num_total_pos, num_total_neg) for i, rests in enumerate(rest_results): # user-added return values rest_results[i] = torch.cat(rests, 0) return res + tuple(rest_results) def _get_targets_single(self, bbox_preds, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: bbox_preds (Tensor): Bbox prediction of the image, which shape is (h * w ,4) flat_anchors (Tensor): Anchors of the image, which shape is (h * w * num_anchors ,4) valid_flags (Tensor): Valid flags of the image, which shape is (h * w * num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). img_meta (dict): Meta info of the image. gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels (Tensor): Labels of image, which shape is (h * w * num_anchors, ). label_weights (Tensor): Label weights of image, which shape is (h * w * num_anchors, ). pos_inds (Tensor): Pos index of image. neg_inds (Tensor): Neg index of image. sampling_result (obj:`SamplingResult`): Sampling result. pos_bbox_weights (Tensor): The Weight of using to calculate the bbox branch loss, which shape is (num, ). pos_predicted_boxes (Tensor): boxes predicted value of using to calculate the bbox branch loss, which shape is (num, 4). pos_target_boxes (Tensor): boxes target value of using to calculate the bbox branch loss, which shape is (num, 4). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 8 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] bbox_preds = bbox_preds.reshape(-1, 4) bbox_preds = bbox_preds[inside_flags, :] # decoded bbox decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) assign_result = self.assigner.assign( decoder_bbox_preds, anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) pos_bbox_weights = assign_result.get_extra_property('pos_idx') pos_predicted_boxes = assign_result.get_extra_property( 'pos_predicted_boxes') pos_target_boxes = assign_result.get_extra_property('target_boxes') sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) return (labels, label_weights, pos_inds, neg_inds, sampling_result, pos_bbox_weights, pos_predicted_boxes, pos_target_boxes)
17,409
40.7506
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/sabl_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import force_fp32 from mmdet.core import (build_assigner, build_bbox_coder, build_prior_generator, build_sampler, images_to_levels, multi_apply, unmap) from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin from .guided_anchor_head import GuidedAnchorHead @HEADS.register_module() class SABLRetinaHead(BaseDenseHead, BBoxTestMixin): """Side-Aware Boundary Localization (SABL) for RetinaNet. The anchor generation, assigning and sampling in SABLRetinaHead are the same as GuidedAnchorHead for guided anchoring. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of Convs for classification \ and regression branches. Defaults to 4. feat_channels (int): Number of hidden channels. \ Defaults to 256. approx_anchor_generator (dict): Config dict for approx generator. square_anchor_generator (dict): Config dict for square generator. conv_cfg (dict): Config dict for ConvModule. Defaults to None. norm_cfg (dict): Config dict for Norm Layer. Defaults to None. bbox_coder (dict): Config dict for bbox coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (dict): Training config of SABLRetinaHead. test_cfg (dict): Testing config of SABLRetinaHead. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, in_channels, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), conv_cfg=None, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01))): super(SABLRetinaHead, self).__init__(init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.num_buckets = bbox_coder['num_buckets'] self.side_num = int(np.ceil(self.num_buckets / 2)) assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = build_prior_generator( approx_anchor_generator) self.square_anchor_generator = build_prior_generator( square_anchor_generator) self.approxs_per_octave = ( self.approx_anchor_generator.num_base_priors[0]) # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.sampling = loss_cls['type'] not in [ 'FocalLoss', 'GHMC', 'QualityFocalLoss' ] if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self._init_layers() @property def num_anchors(self): warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'please use "num_base_priors" instead') return self.square_anchor_generator.num_base_priors[0] def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.retina_bbox_reg = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) self.retina_bbox_cls = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) def forward_single(self, x): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_cls_pred = self.retina_bbox_cls(reg_feat) bbox_reg_pred = self.retina_bbox_reg(reg_feat) bbox_pred = (bbox_cls_pred, bbox_reg_pred) return cls_score, bbox_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] return squares_list def get_target(self, approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=None, sampling=True, unmap_outputs=True): """Compute bucketing targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. gt_bboxes_list (list[Tensor]): Gt bboxes of each image. label_channels (int): Channel of label. sampling (bool): Sample Anchors or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple: Returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each \ level. - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ each level. - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ each level. - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ each level. - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ each level. - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_cls_targets, all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_squares) label_weights_list = images_to_levels(all_label_weights, num_level_squares) bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, num_level_squares) bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, num_level_squares) bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, num_level_squares) bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, num_level_squares) return (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=None, sampling=True, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image, \ shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. sampling (bool): Sample Anchors or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple: - labels_list (Tensor): Labels in a single image - label_weights (Tensor): Label weights in a single image - bbox_cls_targets (Tensor): BBox cls targets in a single image - bbox_cls_weights (Tensor): BBox cls weights in a single image - bbox_reg_targets (Tensor): BBox reg targets in a single image - bbox_reg_weights (Tensor): BBox reg weights in a single image - num_total_pos (int): Number of positive samples \ in a single image - num_total_neg (int): Number of negative samples \ in a single image """ if not inside_flags.any(): return (None, ) * 8 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, self.approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] assign_result = self.assigner.assign(approxs, squares, self.approxs_per_octave, gt_bboxes, gt_bboxes_ignore) sampling_result = self.sampler.sample(assign_result, squares, gt_bboxes) num_valid_squares = squares.shape[0] bbox_cls_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_cls_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) labels = squares.new_full((num_valid_squares, ), self.num_classes, dtype=torch.long) label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, pos_bbox_cls_weights) = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, inside_flags) bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, inside_flags) bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, inside_flags) bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds) def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, num_total_samples): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) (bbox_cls_pred, bbox_reg_pred) = bbox_pred bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) loss_bbox_cls = self.loss_bbox_cls( bbox_cls_pred, bbox_cls_targets.long(), bbox_cls_weights, avg_factor=num_total_samples * 4 * self.side_num) loss_bbox_reg = self.loss_bbox_reg( bbox_reg_pred, bbox_reg_targets, bbox_reg_weights, avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk) return loss_cls, loss_bbox_cls, loss_bbox_reg @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get sampled approxes approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( self, featmap_sizes, img_metas, device=device) square_list = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_target( approxs_list, inside_flag_list, square_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, num_total_samples=num_total_samples) return dict( loss_cls=losses_cls, loss_bbox_cls=losses_bbox_cls, loss_bbox_reg=losses_bbox_reg) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=False): assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device mlvl_anchors = self.get_anchors( featmap_sizes, img_metas, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_cls_pred_list = [ bbox_preds[i][0][img_id].detach() for i in range(num_levels) ] bbox_reg_pred_list = [ bbox_preds[i][1][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single( cls_score_list, bbox_cls_pred_list, bbox_reg_pred_list, mlvl_anchors[img_id], img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_confids = [] mlvl_labels = [] assert len(cls_scores) == len(bbox_cls_preds) == len( bbox_reg_preds) == len(mlvl_anchors) for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): assert cls_score.size()[-2:] == bbox_cls_pred.size( )[-2:] == bbox_reg_pred.size()[-2::] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict( anchors=anchors, bbox_cls_pred=bbox_cls_pred, bbox_reg_pred=bbox_reg_pred)) scores, labels, _, filtered_results = results anchors = filtered_results['anchors'] bbox_cls_pred = filtered_results['bbox_cls_pred'] bbox_reg_pred = filtered_results['bbox_reg_pred'] bbox_preds = [ bbox_cls_pred.contiguous(), bbox_reg_pred.contiguous() ] bboxes, confids = self.bbox_coder.decode( anchors.contiguous(), bbox_preds, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_confids.append(confids) mlvl_labels.append(labels) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, scale_factor, cfg, rescale, True, mlvl_confids)
27,410
42.440571
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/fovea_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmcv.runner import BaseModule from mmdet.core import multi_apply from mmdet.core.utils import filter_scores_and_topk from ..builder import HEADS from .anchor_free_head import AnchorFreeHead INF = 1e8 class FeatureAlign(BaseModule): def __init__(self, in_channels, out_channels, kernel_size=3, deform_groups=4, init_cfg=dict( type='Normal', layer='Conv2d', std=0.1, override=dict( type='Normal', name='conv_adaption', std=0.01))): super(FeatureAlign, self).__init__(init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 4, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x, shape): offset = self.conv_offset(shape) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module() class FoveaHead(AnchorFreeHead): """FoveaBox: Beyond Anchor-based Object Detector https://arxiv.org/abs/1904.03797 """ def __init__(self, num_classes, in_channels, base_edge_list=(16, 32, 64, 128, 256), scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), sigma=0.4, with_deform=False, deform_groups=4, init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs): self.base_edge_list = base_edge_list self.scale_ranges = scale_ranges self.sigma = sigma self.with_deform = with_deform self.deform_groups = deform_groups super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): # box branch super()._init_reg_convs() self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: super()._init_cls_convs() self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs = nn.ModuleList() self.cls_convs.append( ConvModule( self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = nn.Conv2d( int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1) def forward_single(self, x): cls_feat = x reg_feat = x for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) if self.with_deform: cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) return cls_score, bbox_pred def loss(self, cls_scores, bbox_preds, gt_bbox_list, gt_label_list, img_metas, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_labels, flatten_bbox_targets = self.get_targets( gt_bbox_list, gt_label_list, featmap_sizes, points) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < self.num_classes)).nonzero().view(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) if num_pos > 0: pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_weights = pos_bbox_targets.new_zeros( pos_bbox_targets.size()) + 1.0 loss_bbox = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, pos_weights, avg_factor=num_pos) else: loss_bbox = torch.tensor( 0, dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points): label_list, bbox_target_list = multi_apply( self._get_target_single, gt_bbox_list, gt_label_list, featmap_size_list=featmap_sizes, point_list=points) flatten_labels = [ torch.cat([ labels_level_img.flatten() for labels_level_img in labels_level ]) for labels_level in zip(*label_list) ] flatten_bbox_targets = [ torch.cat([ bbox_targets_level_img.reshape(-1, 4) for bbox_targets_level_img in bbox_targets_level ]) for bbox_targets_level in zip(*bbox_target_list) ] flatten_labels = torch.cat(flatten_labels) flatten_bbox_targets = torch.cat(flatten_bbox_targets) return flatten_labels, flatten_bbox_targets def _get_target_single(self, gt_bboxes_raw, gt_labels_raw, featmap_size_list=None, point_list=None): gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) label_list = [] bbox_target_list = [] # for each pyramid, find the cls and box target for base_len, (lower_bound, upper_bound), stride, featmap_size, \ points in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, point_list): # FG cat_id: [0, num_classes -1], BG cat_id: num_classes points = points.view(*featmap_size, 2) x, y = points[..., 0], points[..., 1] labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], 4) + 1 # scale assignment hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(hit_indices) == 0: label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) continue _, hit_index_order = torch.sort(-gt_areas[hit_indices]) hit_indices = hit_indices[hit_index_order] gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride gt_labels = gt_labels_raw[hit_indices] half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) # valid fovea area: left, right, top, down pos_left = torch.ceil( gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_right = torch.floor( gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_top = torch.ceil( gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) pos_down = torch.floor( gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ zip(pos_left, pos_top, pos_right, pos_down, gt_labels, gt_bboxes_raw[hit_indices, :]): labels[py1:py2 + 1, px1:px2 + 1] = label bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) return label_list, bbox_target_list # Same as base_dense_head/_get_bboxes_single except self._bbox_decode def _get_bboxes_single(self, cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors, img_meta, cfg, rescale=False, with_nms=True, **kwargs): """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. Fovea head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, self.strides, self.base_edge_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, img_meta['scale_factor'], cfg, rescale, with_nms) def _bbox_decode(self, priors, bbox_pred, base_len, max_shape): bbox_pred = bbox_pred.exp() y = priors[:, 1] x = priors[:, 0] x1 = (x - base_len * bbox_pred[:, 0]). \ clamp(min=0, max=max_shape[1] - 1) y1 = (y - base_len * bbox_pred[:, 1]). \ clamp(min=0, max=max_shape[0] - 1) x2 = (x + base_len * bbox_pred[:, 2]). \ clamp(min=0, max=max_shape[1] - 1) y2 = (y + base_len * bbox_pred[:, 3]). \ clamp(min=0, max=max_shape[0] - 1) decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) return decoded_bboxes def _get_points_single(self, *args, **kwargs): """Get points according to feature map size. This function will be deprecated soon. """ warnings.warn( '`_get_points_single` in `FoveaHead` will be ' 'deprecated soon, we support a multi level point generator now' 'you can get points of a single level feature map ' 'with `self.prior_generator.single_level_grid_priors` ') y, x = super()._get_points_single(*args, **kwargs) return y + 0.5, x + 0.5
16,364
41.396373
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/dense_heads/dense_test_mixins.py
# Copyright (c) OpenMMLab. All rights reserved. import sys from inspect import signature import torch from mmcv.ops import batched_nms from mmdet.core import bbox_mapping_back, merge_aug_proposals if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin(object): """Mixin class for testing det bboxes via DenseHead.""" def simple_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes without test-time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,) """ outs = self.forward(feats) results_list = self.get_bboxes( *outs, img_metas=img_metas, rescale=rescale) return results_list def aug_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1. """ # check with_nms argument gb_sig = signature(self.get_bboxes) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_bboxes_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_bboxes( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs[0]) aug_scores.append(bbox_outputs[1]) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs[2]) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) return [ (_det_bboxes, det_labels), ] def simple_test_rpn(self, x, img_metas): """Test without augmentation, only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). """ rpn_outs = self(x) proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) return proposal_list def aug_test_rpn(self, feats, img_metas): """Test with augmentation for only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). """ samples_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(samples_per_gpu)] for x, img_meta in zip(feats, img_metas): proposal_list = self.simple_test_rpn(x, img_meta) for i, proposals in enumerate(proposal_list): aug_proposals[i].append(proposals) # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' aug_img_metas = [] for i in range(samples_per_gpu): aug_img_meta = [] for j in range(len(img_metas)): aug_img_meta.append(img_metas[j][i]) aug_img_metas.append(aug_img_meta) # after merging, proposals will be rescaled to the original image size merged_proposals = [ merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) ] return merged_proposals if sys.version_info >= (3, 7): async def async_simple_test_rpn(self, x, img_metas): sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) async with completed( __name__, 'rpn_head_forward', sleep_interval=sleep_interval): rpn_outs = self(x) proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) return proposal_list def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). Returns: tuple[Tensor]: ``bboxes`` with shape (n,4), where 4 represent (tl_x, tl_y, br_x, br_y) and ``scores`` with shape (n,). """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores
8,421
39.68599
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/csp_layer.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule class DarknetBottleneck(BaseModule): """The basic bottleneck block used in Darknet. Each ResBlock consists of two ConvModules and the input is added to the final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. expansion (int): The kernel size of the convolution. Default: 0.5 add_identity (bool): Whether to add identity to the out. Default: True use_depthwise (bool): Whether to use depthwise separable convolution. Default: False conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). """ def __init__(self, in_channels, out_channels, expansion=0.5, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) hidden_channels = int(out_channels * expansion) conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule self.conv1 = ConvModule( in_channels, hidden_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv2 = conv( hidden_channels, out_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.add_identity = \ add_identity and in_channels == out_channels def forward(self, x): identity = x out = self.conv1(x) out = self.conv2(out) if self.add_identity: return out + identity else: return out class CSPLayer(BaseModule): """Cross Stage Partial Layer. Args: in_channels (int): The input channels of the CSP layer. out_channels (int): The output channels of the CSP layer. expand_ratio (float): Ratio to adjust the number of channels of the hidden layer. Default: 0.5 num_blocks (int): Number of blocks. Default: 1 add_identity (bool): Whether to add identity in blocks. Default: True use_depthwise (bool): Whether to depthwise separable convolution in blocks. Default: False conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN') act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish') """ def __init__(self, in_channels, out_channels, expand_ratio=0.5, num_blocks=1, add_identity=True, use_depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = int(out_channels * expand_ratio) self.main_conv = ConvModule( in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.short_conv = ConvModule( in_channels, mid_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.final_conv = ConvModule( 2 * mid_channels, out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.blocks = nn.Sequential(*[ DarknetBottleneck( mid_channels, mid_channels, 1.0, add_identity, use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) for _ in range(num_blocks) ]) def forward(self, x): x_short = self.short_conv(x) x_main = self.main_conv(x) x_main = self.blocks(x_main) x_final = torch.cat((x_main, x_short), dim=1) return self.final_conv(x_final)
5,079
32.642384
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/se_layer.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule class SELayer(BaseModule): """Squeeze-and-Excitation Module. Args: channels (int): The input (and output) channels of the SE layer. ratio (int): Squeeze ratio in SELayer, the intermediate channel will be ``int(channels/ratio)``. Default: 16. conv_cfg (None or dict): Config dict for convolution layer. Default: None, which means using conv2d. act_cfg (dict or Sequence[dict]): Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict and the second activation layer will be configurated by the second dict. Default: (dict(type='ReLU'), dict(type='Sigmoid')) init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, channels, ratio=16, conv_cfg=None, act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), init_cfg=None): super(SELayer, self).__init__(init_cfg) if isinstance(act_cfg, dict): act_cfg = (act_cfg, act_cfg) assert len(act_cfg) == 2 assert mmcv.is_tuple_of(act_cfg, dict) self.global_avgpool = nn.AdaptiveAvgPool2d(1) self.conv1 = ConvModule( in_channels=channels, out_channels=int(channels / ratio), kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[0]) self.conv2 = ConvModule( in_channels=int(channels / ratio), out_channels=channels, kernel_size=1, stride=1, conv_cfg=conv_cfg, act_cfg=act_cfg[1]) def forward(self, x): out = self.global_avgpool(x) out = self.conv1(out) out = self.conv2(out) return x * out
2,175
35.881356
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/gaussian_target.py
# Copyright (c) OpenMMLab. All rights reserved. from math import sqrt import torch import torch.nn.functional as F def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): """Generate 2D gaussian kernel. Args: radius (int): Radius of gaussian kernel. sigma (int): Sigma of gaussian function. Default: 1. dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. device (str): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius + 1) * (2 * radius + 1)`` shape. """ x = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h def gen_gaussian_target(heatmap, center, radius, k=1): """Generate 2D gaussian heatmap. Args: heatmap (Tensor): Input heatmap, the gaussian kernel will cover on it and maintain the max value. center (list[int]): Coord of gaussian kernel's center. radius (int): Radius of gaussian kernel. k (int): Coefficient of gaussian kernel. Default: 1. Returns: out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. """ diameter = 2 * radius + 1 gaussian_kernel = gaussian2D( radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) x, y = center height, width = heatmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] out_heatmap = heatmap torch.max( masked_heatmap, masked_gaussian * k, out=out_heatmap[y - top:y + bottom, x - left:x + right]) return out_heatmap def gaussian_radius(det_size, min_overlap): r"""Generate 2D gaussian radius. This function is modified from the `official github repo <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/ utils.py#L65>`_. Given ``min_overlap``, radius could computed by a quadratic equation according to Vieta's formulas. There are 3 cases for computing gaussian radius, details are following: - Explanation of figure: ``lt`` and ``br`` indicates the left-top and bottom-right corner of ground truth box. ``x`` indicates the generated corner at the limited position when ``radius=r``. - Case1: one corner is inside the gt box and the other is outside. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x----------+--+ | | | | | | | | height | | overlap | | | | | | | | | | v +--+---------br--+ - | | | +----------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x-------+ | | | | | | |overlap| | height | | | | | +-------x--+ | | | v +----------+-br - To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. .. code:: text |< width >| x--+----------------+ | | | +-lt-------------+ | - | | | | ^ | | | | | | overlap | | height | | | | | | | | v | +------------br--+ - | | | +----------------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} Args: det_size (list[int]): Shape of object. min_overlap (float): Min IoU with ground truth for boxes generated by keypoints inside the gaussian kernel. Returns: radius (int): Radius of gaussian kernel. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = sqrt(b1**2 - 4 * a1 * c1) r1 = (b1 - sq1) / (2 * a1) a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = sqrt(b2**2 - 4 * a2 * c2) r2 = (b2 - sq2) / (2 * a2) a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = sqrt(b3**2 - 4 * a3 * c3) r3 = (b3 + sq3) / (2 * a3) return min(r1, r2, r3) def get_local_maximum(heat, kernel=3): """Extract local maximum pixel with given kernel. Args: heat (Tensor): Target heatmap. kernel (int): Kernel size of max pooling. Default: 3. Returns: heat (Tensor): A heatmap where local maximum pixels maintain its own value and other positions are 0. """ pad = (kernel - 1) // 2 hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) keep = (hmax == heat).float() return heat * keep def get_topk_from_heatmap(scores, k=20): """Get top k positions from heatmap. Args: scores (Tensor): Target heatmap with shape [batch, num_classes, height, width]. k (int): Target number. Default: 20. Returns: tuple[torch.Tensor]: Scores, indexes, categories and coords of topk keypoint. Containing following Tensors: - topk_scores (Tensor): Max scores of each topk keypoint. - topk_inds (Tensor): Indexes of each topk keypoint. - topk_clses (Tensor): Categories of each topk keypoint. - topk_ys (Tensor): Y-coord of each topk keypoint. - topk_xs (Tensor): X-coord of each topk keypoint. """ batch, _, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) topk_clses = topk_inds // (height * width) topk_inds = topk_inds % (height * width) topk_ys = topk_inds // width topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs def gather_feat(feat, ind, mask=None): """Gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. mask (Tensor | None): Mask of feature map. Default: None. Returns: feat (Tensor): Gathered feature. """ dim = feat.size(2) ind = ind.unsqueeze(2).repeat(1, 1, dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def transpose_and_gather_feat(feat, ind): """Transpose and gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. Returns: feat (Tensor): Transposed and gathered feature. """ feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = gather_feat(feat, ind) return feat
8,393
30.204461
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/normed_predictor.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import CONV_LAYERS from .builder import LINEAR_LAYERS @LINEAR_LAYERS.register_module(name='NormedLinear') class NormedLinear(nn.Linear): """Normalized Linear Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs): super(NormedLinear, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.eps = eps self.init_weights() def init_weights(self): nn.init.normal_(self.weight, mean=0, std=0.01) if self.bias is not None: nn.init.constant_(self.bias, 0) def forward(self, x): weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture return F.linear(x_, weight_, self.bias) @CONV_LAYERS.register_module(name='NormedConv2d') class NormedConv2d(nn.Conv2d): """Normalized Conv2d Layer. Args: tempeature (float, optional): Tempeature term. Default to 20. power (int, optional): Power term. Default to 1.0. eps (float, optional): The minimal value of divisor to keep numerical stability. Default to 1e-6. norm_over_kernel (bool, optional): Normalize over kernel. Default to False. """ def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, norm_over_kernel=False, **kwargs): super(NormedConv2d, self).__init__(*args, **kwargs) self.tempearture = tempearture self.power = power self.norm_over_kernel = norm_over_kernel self.eps = eps def forward(self, x): if not self.norm_over_kernel: weight_ = self.weight / ( self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) else: weight_ = self.weight / ( self.weight.view(self.weight.size(0), -1).norm( dim=1, keepdim=True).pow(self.power)[..., None, None] + self.eps) x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) x_ = x_ * self.tempearture if hasattr(self, 'conv2d_forward'): x_ = self.conv2d_forward(x_, weight_) else: if torch.__version__ >= '1.8': x_ = self._conv_forward(x_, weight_, self.bias) else: x_ = self._conv_forward(x_, weight_) return x_
2,998
32.696629
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/ckpt_convert.py
# Copyright (c) OpenMMLab. All rights reserved. # This script consists of several convert functions which # can modify the weights of model in original repo to be # pre-trained weights. from collections import OrderedDict import torch def pvt_convert(ckpt): new_ckpt = OrderedDict() # Process the concat between q linear weights and kv linear weights use_abs_pos_embed = False use_conv_ffn = False for k in ckpt.keys(): if k.startswith('pos_embed'): use_abs_pos_embed = True if k.find('dwconv') >= 0: use_conv_ffn = True for k, v in ckpt.items(): if k.startswith('head'): continue if k.startswith('norm.'): continue if k.startswith('cls_token'): continue if k.startswith('pos_embed'): stage_i = int(k.replace('pos_embed', '')) new_k = k.replace(f'pos_embed{stage_i}', f'layers.{stage_i - 1}.1.0.pos_embed') if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 new_v = v[:, 1:, :] # remove cls token else: new_v = v elif k.startswith('patch_embed'): stage_i = int(k.split('.')[0].replace('patch_embed', '')) new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i - 1}.0') new_v = v if 'proj.' in new_k: new_k = new_k.replace('proj.', 'projection.') elif k.startswith('block'): stage_i = int(k.split('.')[0].replace('block', '')) layer_i = int(k.split('.')[1]) new_layer_i = layer_i + use_abs_pos_embed new_k = k.replace(f'block{stage_i}.{layer_i}', f'layers.{stage_i - 1}.1.{new_layer_i}') new_v = v if 'attn.q.' in new_k: sub_item_k = k.replace('q.', 'kv.') new_k = new_k.replace('q.', 'attn.in_proj_') new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) elif 'attn.kv.' in new_k: continue elif 'attn.proj.' in new_k: new_k = new_k.replace('proj.', 'attn.out_proj.') elif 'attn.sr.' in new_k: new_k = new_k.replace('sr.', 'sr.') elif 'mlp.' in new_k: string = f'{new_k}-' new_k = new_k.replace('mlp.', 'ffn.layers.') if 'fc1.weight' in new_k or 'fc2.weight' in new_k: new_v = v.reshape((*v.shape, 1, 1)) new_k = new_k.replace('fc1.', '0.') new_k = new_k.replace('dwconv.dwconv.', '1.') if use_conv_ffn: new_k = new_k.replace('fc2.', '4.') else: new_k = new_k.replace('fc2.', '3.') string += f'{new_k} {v.shape}-{new_v.shape}' elif k.startswith('norm'): stage_i = int(k[4]) new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') new_v = v else: new_k = k new_v = v new_ckpt[new_k] = new_v return new_ckpt def swin_converter(ckpt): new_ckpt = OrderedDict() def correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, 4, in_channel // 4) x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel) return x def correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(4, in_channel // 4) x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) return x for k, v in ckpt.items(): if k.startswith('head'): continue elif k.startswith('layers'): new_v = v if 'attn.' in k: new_k = k.replace('attn.', 'attn.w_msa.') elif 'mlp.' in k: if 'mlp.fc1.' in k: new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') elif 'mlp.fc2.' in k: new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') else: new_k = k.replace('mlp.', 'ffn.') elif 'downsample' in k: new_k = k if 'reduction.' in k: new_v = correct_unfold_reduction_order(v) elif 'norm.' in k: new_v = correct_unfold_norm_order(v) else: new_k = k new_k = new_k.replace('layers', 'stages', 1) elif k.startswith('patch_embed'): new_v = v if 'proj' in k: new_k = k.replace('proj', 'projection') else: new_k = k else: new_v = v new_k = k new_ckpt['backbone.' + new_k] = new_v return new_ckpt
4,964
34.978261
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/conv_upsample.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList class ConvUpsample(BaseModule): """ConvUpsample performs 2x upsampling after Conv. There are several `ConvModule` layers. In the first few layers, upsampling will be applied after each layer of convolution. The number of upsampling must be no more than the number of ConvModule layers. Args: in_channels (int): Number of channels in the input feature map. inner_channels (int): Number of channels produced by the convolution. num_layers (int): Number of convolution layers. num_upsample (int | optional): Number of upsampling layer. Must be no more than num_layers. Upsampling will be applied after the first ``num_upsample`` layers of convolution. Default: ``num_layers``. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict): Config dict for initialization. Default: None. kwargs (key word augments): Other augments used in ConvModule. """ def __init__(self, in_channels, inner_channels, num_layers=1, num_upsample=None, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): super(ConvUpsample, self).__init__(init_cfg) if num_upsample is None: num_upsample = num_layers assert num_upsample <= num_layers, \ f'num_upsample({num_upsample})must be no more than ' \ f'num_layers({num_layers})' self.num_layers = num_layers self.num_upsample = num_upsample self.conv = ModuleList() for i in range(num_layers): self.conv.append( ConvModule( in_channels, inner_channels, 3, padding=1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) in_channels = inner_channels def forward(self, x): num_upsample = self.num_upsample for i in range(self.num_layers): x = self.conv[i](x) if num_upsample > 0: num_upsample -= 1 x = F.interpolate( x, scale_factor=2, mode='bilinear', align_corners=False) return x
2,653
38.029412
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/misc.py
# Copyright (c) OpenMMLab. All rights reserved. from torch.nn import functional as F def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners)
1,655
37.511628
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/res_layer.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule, Sequential from torch import nn as nn class ResLayer(Sequential): """ResLayer to build ResNet style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] if downsample_first: layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) else: # downsample_first=False is for HourglassModule for _ in range(num_blocks - 1): layers.append( block( inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) class SimplifiedBasicBlock(BaseModule): """Simplified version of original basic residual block. This is used in `SCNet <https://arxiv.org/abs/2012.10150>`_. - Norm layer is now optional - Last ReLU in forward function is removed """ expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_fg=None): super(SimplifiedBasicBlock, self).__init__(init_fg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' assert not with_cp, 'Not implemented yet.' self.with_norm = norm_cfg is not None with_bias = True if norm_cfg is None else False self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=with_bias) if self.with_norm: self.norm1_name, norm1 = build_norm_layer( norm_cfg, planes, postfix=1) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=with_bias) if self.with_norm: self.norm2_name, norm2 = build_norm_layer( norm_cfg, planes, postfix=2) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) if self.with_norm else None @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) if self.with_norm else None def forward(self, x): """Forward function.""" identity = x out = self.conv1(x) if self.with_norm: out = self.norm1(out) out = self.relu(out) out = self.conv2(out) if self.with_norm: out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out
6,392
32.471204
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/brick_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version if torch.__version__ == 'parrots': TORCH_VERSION = torch.__version__ else: # torch.__version__ could be 1.3.1+cu92, we only need the first two # for comparison TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) def adaptive_avg_pool2d(input, output_size): """Handle empty batch dimension to adaptive_avg_pool2d. Args: input (tensor): 4D tensor. output_size (int, tuple[int,int]): the target output size. """ if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): if isinstance(output_size, int): output_size = [output_size, output_size] output_size = [*input.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(input, output_size) return empty else: return F.adaptive_avg_pool2d(input, output_size) class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): """Handle empty batch dimension to AdaptiveAvgPool2d.""" def forward(self, x): # PyTorch 1.9 does not support empty tensor inference yet if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): output_size = self.output_size if isinstance(output_size, int): output_size = [output_size, output_size] else: output_size = [ v if v is not None else d for v, d in zip(output_size, x.size()[-2:]) ] output_size = [*x.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(x, output_size) return empty return super().forward(x)
1,856
34.711538
77
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/transformer.py
# Copyright (c) OpenMMLab. All rights reserved. import math import warnings from typing import Sequence import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (build_activation_layer, build_conv_layer, build_norm_layer, xavier_init) from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, TransformerLayerSequence, build_transformer_layer_sequence) from mmcv.runner.base_module import BaseModule from mmcv.utils import to_2tuple from torch.nn.init import normal_ from mmdet.models.utils.builder import TRANSFORMER try: from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention except ImportError: warnings.warn( '`MultiScaleDeformableAttention` in MMCV has been moved to ' '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention def nlc_to_nchw(x, hw_shape): """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion. """ H, W = hw_shape assert len(x.shape) == 3 B, L, C = x.shape assert L == H * W, 'The seq_len does not match H, W' return x.transpose(1, 2).reshape(B, C, H, W).contiguous() def nchw_to_nlc(x): """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion. """ assert len(x.shape) == 4 return x.flatten(2).transpose(1, 2).contiguous() class AdaptivePadding(nn.Module): """Applies padding to input (if needed) so that input can get fully covered by filter you specified. It support two modes "same" and "corner". The "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around input. The "corner" mode would pad zero to bottom right. Args: kernel_size (int | tuple): Size of the kernel: stride (int | tuple): Stride of the filter. Default: 1: dilation (int | tuple): Spacing between kernel elements. Default: 1 padding (str): Support "same" and "corner", "corner" mode would pad zero to bottom right, and "same" mode would pad zero around input. Default: "corner". Example: >>> kernel_size = 16 >>> stride = 16 >>> dilation = 1 >>> input = torch.rand(1, 1, 15, 17) >>> adap_pad = AdaptivePadding( >>> kernel_size=kernel_size, >>> stride=stride, >>> dilation=dilation, >>> padding="corner") >>> out = adap_pad(input) >>> assert (out.shape[2], out.shape[3]) == (16, 32) >>> input = torch.rand(1, 1, 16, 17) >>> out = adap_pad(input) >>> assert (out.shape[2], out.shape[3]) == (16, 32) """ def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): super(AdaptivePadding, self).__init__() assert padding in ('same', 'corner') kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) dilation = to_2tuple(dilation) self.padding = padding self.kernel_size = kernel_size self.stride = stride self.dilation = dilation def get_pad_shape(self, input_shape): input_h, input_w = input_shape kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.stride output_h = math.ceil(input_h / stride_h) output_w = math.ceil(input_w / stride_w) pad_h = max((output_h - 1) * stride_h + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) pad_w = max((output_w - 1) * stride_w + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) return pad_h, pad_w def forward(self, x): pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) if pad_h > 0 or pad_w > 0: if self.padding == 'corner': x = F.pad(x, [0, pad_w, 0, pad_h]) elif self.padding == 'same': x = F.pad(x, [ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 ]) return x class PatchEmbed(BaseModule): """Image to Patch Embedding. We use a conv layer to implement PatchEmbed. Args: in_channels (int): The num of input channels. Default: 3 embed_dims (int): The dimensions of embedding. Default: 768 conv_type (str): The config dict for embedding conv layer type selection. Default: "Conv2d. kernel_size (int): The kernel_size of embedding conv. Default: 16. stride (int): The slide stride of embedding conv. Default: None (Would be set as `kernel_size`). padding (int | tuple | string ): The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support "same" and "corner" now. Default: "corner". dilation (int): The dilation rate of embedding conv. Default: 1. bias (bool): Bias of embed conv. Default: True. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. input_size (int | tuple | None): The size of input, which will be used to calculate the out size. Only work when `dynamic_size` is False. Default: None. init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. Default: None. """ def __init__( self, in_channels=3, embed_dims=768, conv_type='Conv2d', kernel_size=16, stride=16, padding='corner', dilation=1, bias=True, norm_cfg=None, input_size=None, init_cfg=None, ): super(PatchEmbed, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims if stride is None: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adap_padding = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) # disable the padding of conv padding = 0 else: self.adap_padding = None padding = to_2tuple(padding) self.projection = build_conv_layer( dict(type=conv_type), in_channels=in_channels, out_channels=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) if norm_cfg is not None: self.norm = build_norm_layer(norm_cfg, embed_dims)[1] else: self.norm = None if input_size: input_size = to_2tuple(input_size) # `init_out_size` would be used outside to # calculate the num_patches # when `use_abs_pos_embed` outside self.init_input_size = input_size if self.adap_padding: pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) input_h, input_w = input_size input_h = input_h + pad_h input_w = input_w + pad_w input_size = (input_h, input_w) # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html h_out = (input_size[0] + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // stride[0] + 1 w_out = (input_size[1] + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // stride[1] + 1 self.init_out_size = (h_out, w_out) else: self.init_input_size = None self.init_out_size = None def forward(self, x): """ Args: x (Tensor): Has shape (B, C, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_h, out_w). """ if self.adap_padding: x = self.adap_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) if self.norm is not None: x = self.norm(x) return x, out_size class PatchMerging(BaseModule): """Merge patch feature map. This layer groups feature map by kernel_size, and applies norm and linear layers to the grouped feature map. Our implementation uses `nn.Unfold` to merge patch, which is about 25% faster than original implementation. Instead, we need to modify pretrained models for compatibility. Args: in_channels (int): The num of input channels. to gets fully covered by filter and stride you specified.. Default: True. out_channels (int): The num of output channels. kernel_size (int | tuple, optional): the kernel size in the unfold layer. Defaults to 2. stride (int | tuple, optional): the stride of the sliding blocks in the unfold layer. Default: None. (Would be set as `kernel_size`) padding (int | tuple | string ): The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support "same" and "corner" now. Default: "corner". dilation (int | tuple, optional): dilation parameter in the unfold layer. Default: 1. bias (bool, optional): Whether to add bias in linear layer or not. Defaults: False. norm_cfg (dict, optional): Config dict for normalization layer. Default: dict(type='LN'). init_cfg (dict, optional): The extra config for initialization. Default: None. """ def __init__(self, in_channels, out_channels, kernel_size=2, stride=None, padding='corner', dilation=1, bias=False, norm_cfg=dict(type='LN'), init_cfg=None): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels if stride: stride = stride else: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adap_padding = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) # disable the padding of unfold padding = 0 else: self.adap_padding = None padding = to_2tuple(padding) self.sampler = nn.Unfold( kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride) sample_dim = kernel_size[0] * kernel_size[1] * in_channels if norm_cfg is not None: self.norm = build_norm_layer(norm_cfg, sample_dim)[1] else: self.norm = None self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) def forward(self, x, input_size): """ Args: x (Tensor): Has shape (B, H*W, C_in). input_size (tuple[int]): The spatial shape of x, arrange as (H, W). Default: None. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W). """ B, L, C = x.shape assert isinstance(input_size, Sequence), f'Expect ' \ f'input_size is ' \ f'`Sequence` ' \ f'but get {input_size}' H, W = input_size assert L == H * W, 'input feature has wrong size' x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W # Use nn.Unfold to merge patch. About 25% faster than original method, # but need to modify pretrained model for compatibility if self.adap_padding: x = self.adap_padding(x) H, W = x.shape[-2:] x = self.sampler(x) # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * (self.sampler.kernel_size[0] - 1) - 1) // self.sampler.stride[0] + 1 out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * (self.sampler.kernel_size[1] - 1) - 1) // self.sampler.stride[1] + 1 output_size = (out_h, out_w) x = x.transpose(1, 2) # B, H/2*W/2, 4*C x = self.norm(x) if self.norm else x x = self.reduction(x) return x, output_size def inverse_sigmoid(x, eps=1e-5): """Inverse function of sigmoid. Args: x (Tensor): The tensor to do the inverse. eps (float): EPS avoid numerical overflow. Defaults 1e-5. Returns: Tensor: The x has passed the inverse function of sigmoid, has same shape with input. """ x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) @TRANSFORMER_LAYER.register_module() class DetrTransformerDecoderLayer(BaseTransformerLayer): """Implements decoder layer in DETR transformer. Args: attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): Configs for self_attention or cross_attention, the order should be consistent with it in `operation_order`. If it is a dict, it would be expand to the number of attention in `operation_order`. feedforward_channels (int): The hidden dimension for FFNs. ffn_dropout (float): Probability of an element to be zeroed in ffn. Default 0.0. operation_order (tuple[str]): The execution order of operation in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). Default:None act_cfg (dict): The activation config for FFNs. Default: `LN` norm_cfg (dict): Config dict for normalization layer. Default: `LN`. ffn_num_fcs (int): The number of fully-connected layers in FFNs. Default:2. """ def __init__(self, attn_cfgs, feedforward_channels, ffn_dropout=0.0, operation_order=None, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), ffn_num_fcs=2, **kwargs): super(DetrTransformerDecoderLayer, self).__init__( attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, act_cfg=act_cfg, norm_cfg=norm_cfg, ffn_num_fcs=ffn_num_fcs, **kwargs) assert len(operation_order) == 6 assert set(operation_order) == set( ['self_attn', 'norm', 'cross_attn', 'ffn']) @TRANSFORMER_LAYER_SEQUENCE.register_module() class DetrTransformerEncoder(TransformerLayerSequence): """TransformerEncoder of DETR. Args: post_norm_cfg (dict): Config of last normalization layer. Default: `LN`. Only used when `self.pre_norm` is `True` """ def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): super(DetrTransformerEncoder, self).__init__(*args, **kwargs) if post_norm_cfg is not None: self.post_norm = build_norm_layer( post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None else: assert not self.pre_norm, f'Use prenorm in ' \ f'{self.__class__.__name__},' \ f'Please specify post_norm_cfg' self.post_norm = None def forward(self, *args, **kwargs): """Forward function for `TransformerCoder`. Returns: Tensor: forwarded results with shape [num_query, bs, embed_dims]. """ x = super(DetrTransformerEncoder, self).forward(*args, **kwargs) if self.post_norm is not None: x = self.post_norm(x) return x @TRANSFORMER_LAYER_SEQUENCE.register_module() class DetrTransformerDecoder(TransformerLayerSequence): """Implements the decoder in DETR transformer. Args: return_intermediate (bool): Whether to return intermediate outputs. post_norm_cfg (dict): Config of last normalization layer. Default: `LN`. """ def __init__(self, *args, post_norm_cfg=dict(type='LN'), return_intermediate=False, **kwargs): super(DetrTransformerDecoder, self).__init__(*args, **kwargs) self.return_intermediate = return_intermediate if post_norm_cfg is not None: self.post_norm = build_norm_layer(post_norm_cfg, self.embed_dims)[1] else: self.post_norm = None def forward(self, query, *args, **kwargs): """Forward function for `TransformerDecoder`. Args: query (Tensor): Input query with shape `(num_query, bs, embed_dims)`. Returns: Tensor: Results with shape [1, num_query, bs, embed_dims] when return_intermediate is `False`, otherwise it has shape [num_layers, num_query, bs, embed_dims]. """ if not self.return_intermediate: x = super().forward(query, *args, **kwargs) if self.post_norm: x = self.post_norm(x)[None] return x intermediate = [] for layer in self.layers: query = layer(query, *args, **kwargs) if self.return_intermediate: if self.post_norm is not None: intermediate.append(self.post_norm(query)) else: intermediate.append(query) return torch.stack(intermediate) @TRANSFORMER.register_module() class Transformer(BaseModule): """Implements the DETR transformer. Following the official DETR implementation, this module copy-paste from torch.nn.Transformer with modifications: * positional encodings are passed in MultiheadAttention * extra LN at the end of encoder is removed * decoder returns a stack of activations from all decoding layers See `paper: End-to-End Object Detection with Transformers <https://arxiv.org/pdf/2005.12872>`_ for details. Args: encoder (`mmcv.ConfigDict` | Dict): Config of TransformerEncoder. Defaults to None. decoder ((`mmcv.ConfigDict` | Dict)): Config of TransformerDecoder. Defaults to None init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Defaults to None. """ def __init__(self, encoder=None, decoder=None, init_cfg=None): super(Transformer, self).__init__(init_cfg=init_cfg) self.encoder = build_transformer_layer_sequence(encoder) self.decoder = build_transformer_layer_sequence(decoder) self.embed_dims = self.encoder.embed_dims def init_weights(self): # follow the official DETR to init parameters for m in self.modules(): if hasattr(m, 'weight') and m.weight.dim() > 1: xavier_init(m, distribution='uniform') self._is_init = True def forward(self, x, mask, query_embed, pos_embed): """Forward function for `Transformer`. Args: x (Tensor): Input query with shape [bs, c, h, w] where c = embed_dims. mask (Tensor): The key_padding_mask used for encoder and decoder, with shape [bs, h, w]. query_embed (Tensor): The query embedding for decoder, with shape [num_query, c]. pos_embed (Tensor): The positional encoding for encoder and decoder, with the same shape as `x`. Returns: tuple[Tensor]: results of decoder containing the following tensor. - out_dec: Output from decoder. If return_intermediate_dec \ is True output has shape [num_dec_layers, bs, num_query, embed_dims], else has shape [1, bs, \ num_query, embed_dims]. - memory: Output results from encoder, with shape \ [bs, embed_dims, h, w]. """ bs, c, h, w = x.shape # use `view` instead of `flatten` for dynamically exporting to ONNX x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) query_embed = query_embed.unsqueeze(1).repeat( 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] memory = self.encoder( query=x, key=None, value=None, query_pos=pos_embed, query_key_padding_mask=mask) target = torch.zeros_like(query_embed) # out_dec: [num_layers, num_query, bs, dim] out_dec = self.decoder( query=target, key=memory, value=memory, key_pos=pos_embed, query_pos=query_embed, key_padding_mask=mask) out_dec = out_dec.transpose(1, 2) memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) return out_dec, memory @TRANSFORMER_LAYER_SEQUENCE.register_module() class DeformableDetrTransformerDecoder(TransformerLayerSequence): """Implements the decoder in DETR transformer. Args: return_intermediate (bool): Whether to return intermediate outputs. coder_norm_cfg (dict): Config of last normalization layer. Default: `LN`. """ def __init__(self, *args, return_intermediate=False, **kwargs): super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) self.return_intermediate = return_intermediate def forward(self, query, *args, reference_points=None, valid_ratios=None, reg_branches=None, **kwargs): """Forward function for `TransformerDecoder`. Args: query (Tensor): Input query with shape `(num_query, bs, embed_dims)`. reference_points (Tensor): The reference points of offset. has shape (bs, num_query, 4) when as_two_stage, otherwise has shape ((bs, num_query, 2). valid_ratios (Tensor): The radios of valid points on the feature map, has shape (bs, num_levels, 2) reg_branch: (obj:`nn.ModuleList`): Used for refining the regression results. Only would be passed when with_box_refine is True, otherwise would be passed a `None`. Returns: Tensor: Results with shape [1, num_query, bs, embed_dims] when return_intermediate is `False`, otherwise it has shape [num_layers, num_query, bs, embed_dims]. """ output = query intermediate = [] intermediate_reference_points = [] for lid, layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = reference_points[:, :, None] * \ torch.cat([valid_ratios, valid_ratios], -1)[:, None] else: assert reference_points.shape[-1] == 2 reference_points_input = reference_points[:, :, None] * \ valid_ratios[:, None] output = layer( output, *args, reference_points=reference_points_input, **kwargs) output = output.permute(1, 0, 2) if reg_branches is not None: tmp = reg_branches[lid](output) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid( reference_points) new_reference_points = new_reference_points.sigmoid() else: assert reference_points.shape[-1] == 2 new_reference_points = tmp new_reference_points[..., :2] = tmp[ ..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() output = output.permute(1, 0, 2) if self.return_intermediate: intermediate.append(output) intermediate_reference_points.append(reference_points) if self.return_intermediate: return torch.stack(intermediate), torch.stack( intermediate_reference_points) return output, reference_points @TRANSFORMER.register_module() class DeformableDetrTransformer(Transformer): """Implements the DeformableDETR transformer. Args: as_two_stage (bool): Generate query from encoder features. Default: False. num_feature_levels (int): Number of feature maps from FPN: Default: 4. two_stage_num_proposals (int): Number of proposals when set `as_two_stage` as True. Default: 300. """ def __init__(self, as_two_stage=False, num_feature_levels=4, two_stage_num_proposals=300, **kwargs): super(DeformableDetrTransformer, self).__init__(**kwargs) self.as_two_stage = as_two_stage self.num_feature_levels = num_feature_levels self.two_stage_num_proposals = two_stage_num_proposals self.embed_dims = self.encoder.embed_dims self.init_layers() def init_layers(self): """Initialize layers of the DeformableDetrTransformer.""" self.level_embeds = nn.Parameter( torch.Tensor(self.num_feature_levels, self.embed_dims)) if self.as_two_stage: self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) self.enc_output_norm = nn.LayerNorm(self.embed_dims) self.pos_trans = nn.Linear(self.embed_dims * 2, self.embed_dims * 2) self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) else: self.reference_points = nn.Linear(self.embed_dims, 2) def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, MultiScaleDeformableAttention): m.init_weights() if not self.as_two_stage: xavier_init(self.reference_points, distribution='uniform', bias=0.) normal_(self.level_embeds) def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes): """Generate proposals from encoded memory. Args: memory (Tensor) : The output of encoder, has shape (bs, num_key, embed_dim). num_key is equal the number of points on feature map from all level. memory_padding_mask (Tensor): Padding mask for memory. has shape (bs, num_key). spatial_shapes (Tensor): The shape of all feature maps. has shape (num_level, 2). Returns: tuple: A tuple of feature map and bbox prediction. - output_memory (Tensor): The input of decoder, \ has shape (bs, num_key, embed_dim). num_key is \ equal the number of points on feature map from \ all levels. - output_proposals (Tensor): The normalized proposal \ after a inverse sigmoid, has shape \ (bs, num_keys, 4). """ N, S, C = memory.shape proposals = [] _cur = 0 for lvl, (H, W) in enumerate(spatial_shapes): mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( N, H, W, 1) valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = torch.meshgrid( torch.linspace( 0, H - 1, H, dtype=torch.float32, device=memory.device), torch.linspace( 0, W - 1, W, dtype=torch.float32, device=memory.device)) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) proposal = torch.cat((grid, wh), -1).view(N, -1, 4) proposals.append(proposal) _cur += (H * W) output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all( -1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) output_proposals = output_proposals.masked_fill( memory_padding_mask.unsqueeze(-1), float('inf')) output_proposals = output_proposals.masked_fill( ~output_proposals_valid, float('inf')) output_memory = memory output_memory = output_memory.masked_fill( memory_padding_mask.unsqueeze(-1), float(0)) output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) output_memory = self.enc_output_norm(self.enc_output(output_memory)) return output_memory, output_proposals @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """Get the reference points used in decoder. Args: spatial_shapes (Tensor): The shape of all feature maps, has shape (num_level, 2). valid_ratios (Tensor): The radios of valid points on the feature map, has shape (bs, num_levels, 2) device (obj:`device`): The device where reference_points should be. Returns: Tensor: reference points used in decoder, has \ shape (bs, num_keys, num_levels, 2). """ reference_points_list = [] for lvl, (H, W) in enumerate(spatial_shapes): # TODO check this 0.5 ref_y, ref_x = torch.meshgrid( torch.linspace( 0.5, H - 0.5, H, dtype=torch.float32, device=device), torch.linspace( 0.5, W - 0.5, W, dtype=torch.float32, device=device)) ref_y = ref_y.reshape(-1)[None] / ( valid_ratios[:, None, lvl, 1] * H) ref_x = ref_x.reshape(-1)[None] / ( valid_ratios[:, None, lvl, 0] * W) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def get_valid_ratio(self, mask): """Get the valid radios of feature maps of all level.""" _, H, W = mask.shape valid_H = torch.sum(~mask[:, :, 0], 1) valid_W = torch.sum(~mask[:, 0, :], 1) valid_ratio_h = valid_H.float() / H valid_ratio_w = valid_W.float() / W valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) return valid_ratio def get_proposal_pos_embed(self, proposals, num_pos_feats=128, temperature=10000): """Get the position embedding of proposal.""" scale = 2 * math.pi dim_t = torch.arange( num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) # N, L, 4 proposals = proposals.sigmoid() * scale # N, L, 4, 128 pos = proposals[:, :, :, None] / dim_t # N, L, 4, 64, 2 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def forward(self, mlvl_feats, mlvl_masks, query_embed, mlvl_pos_embeds, reg_branches=None, cls_branches=None, **kwargs): """Forward function for `Transformer`. Args: mlvl_feats (list(Tensor)): Input queries from different level. Each element has shape [bs, embed_dims, h, w]. mlvl_masks (list(Tensor)): The key_padding_mask from different level used for encoder and decoder, each element has shape [bs, h, w]. query_embed (Tensor): The query embedding for decoder, with shape [num_query, c]. mlvl_pos_embeds (list(Tensor)): The positional encoding of feats from different level, has the shape [bs, embed_dims, h, w]. reg_branches (obj:`nn.ModuleList`): Regression heads for feature maps from each decoder layer. Only would be passed when `with_box_refine` is True. Default to None. cls_branches (obj:`nn.ModuleList`): Classification heads for feature maps from each decoder layer. Only would be passed when `as_two_stage` is True. Default to None. Returns: tuple[Tensor]: results of decoder containing the following tensor. - inter_states: Outputs from decoder. If return_intermediate_dec is True output has shape \ (num_dec_layers, bs, num_query, embed_dims), else has \ shape (1, bs, num_query, embed_dims). - init_reference_out: The initial value of reference \ points, has shape (bs, num_queries, 4). - inter_references_out: The internal value of reference \ points in decoder, has shape \ (num_dec_layers, bs,num_query, embed_dims) - enc_outputs_class: The classification score of \ proposals generated from \ encoder's feature maps, has shape \ (batch, h*w, num_classes). \ Only would be returned when `as_two_stage` is True, \ otherwise None. - enc_outputs_coord_unact: The regression results \ generated from encoder's feature maps., has shape \ (batch, h*w, 4). Only would \ be returned when `as_two_stage` is True, \ otherwise None. """ assert self.as_two_stage or query_embed is not None feat_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for lvl, (feat, mask, pos_embed) in enumerate( zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): bs, c, h, w = feat.shape spatial_shape = (h, w) spatial_shapes.append(spatial_shape) feat = feat.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) feat_flatten.append(feat) mask_flatten.append(mask) feat_flatten = torch.cat(feat_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor( spatial_shapes, dtype=torch.long, device=feat_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros( (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack( [self.get_valid_ratio(m) for m in mlvl_masks], 1) reference_points = \ self.get_reference_points(spatial_shapes, valid_ratios, device=feat.device) feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( 1, 0, 2) # (H*W, bs, embed_dims) memory = self.encoder( query=feat_flatten, key=None, value=None, query_pos=lvl_pos_embed_flatten, query_key_padding_mask=mask_flatten, spatial_shapes=spatial_shapes, reference_points=reference_points, level_start_index=level_start_index, valid_ratios=valid_ratios, **kwargs) memory = memory.permute(1, 0, 2) bs, _, c = memory.shape if self.as_two_stage: output_memory, output_proposals = \ self.gen_encoder_output_proposals( memory, mask_flatten, spatial_shapes) enc_outputs_class = cls_branches[self.decoder.num_layers]( output_memory) enc_outputs_coord_unact = \ reg_branches[ self.decoder.num_layers](output_memory) + output_proposals topk = self.two_stage_num_proposals topk_proposals = torch.topk( enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_unact = torch.gather( enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) topk_coords_unact = topk_coords_unact.detach() reference_points = topk_coords_unact.sigmoid() init_reference_out = reference_points pos_trans_out = self.pos_trans_norm( self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) query_pos, query = torch.split(pos_trans_out, c, dim=2) else: query_pos, query = torch.split(query_embed, c, dim=1) query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) query = query.unsqueeze(0).expand(bs, -1, -1) reference_points = self.reference_points(query_pos).sigmoid() init_reference_out = reference_points # decoder query = query.permute(1, 0, 2) memory = memory.permute(1, 0, 2) query_pos = query_pos.permute(1, 0, 2) inter_states, inter_references = self.decoder( query=query, key=None, value=memory, query_pos=query_pos, key_padding_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, reg_branches=reg_branches, **kwargs) inter_references_out = inter_references if self.as_two_stage: return inter_states, init_reference_out,\ inter_references_out, enc_outputs_class,\ enc_outputs_coord_unact return inter_states, init_reference_out, \ inter_references_out, None, None @TRANSFORMER.register_module() class DynamicConv(BaseModule): """Implements Dynamic Convolution. This module generate parameters for each sample and use bmm to implement 1*1 convolution. Code is modified from the `official github repo <https://github.com/PeizeSun/ SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py#L258>`_ . Args: in_channels (int): The input feature channel. Defaults to 256. feat_channels (int): The inner feature channel. Defaults to 64. out_channels (int, optional): The output feature channel. When not specified, it will be set to `in_channels` by default input_feat_shape (int): The shape of input feature. Defaults to 7. with_proj (bool): Project two-dimentional feature to one-dimentional feature. Default to True. act_cfg (dict): The activation config for DynamicConv. norm_cfg (dict): Config dict for normalization layer. Default layer normalization. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, in_channels=256, feat_channels=64, out_channels=None, input_feat_shape=7, with_proj=True, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'), init_cfg=None): super(DynamicConv, self).__init__(init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.out_channels_raw = out_channels self.input_feat_shape = input_feat_shape self.with_proj = with_proj self.act_cfg = act_cfg self.norm_cfg = norm_cfg self.out_channels = out_channels if out_channels else in_channels self.num_params_in = self.in_channels * self.feat_channels self.num_params_out = self.out_channels * self.feat_channels self.dynamic_layer = nn.Linear( self.in_channels, self.num_params_in + self.num_params_out) self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] self.activation = build_activation_layer(act_cfg) num_output = self.out_channels * input_feat_shape**2 if self.with_proj: self.fc_layer = nn.Linear(num_output, self.out_channels) self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] def forward(self, param_feature, input_feature): """Forward function for `DynamicConv`. Args: param_feature (Tensor): The feature can be used to generate the parameter, has shape (num_all_proposals, in_channels). input_feature (Tensor): Feature that interact with parameters, has shape (num_all_proposals, in_channels, H, W). Returns: Tensor: The output feature has shape (num_all_proposals, out_channels). """ input_feature = input_feature.flatten(2).permute(2, 0, 1) input_feature = input_feature.permute(1, 0, 2) parameters = self.dynamic_layer(param_feature) param_in = parameters[:, :self.num_params_in].view( -1, self.in_channels, self.feat_channels) param_out = parameters[:, -self.num_params_out:].view( -1, self.feat_channels, self.out_channels) # input_feature has shape (num_all_proposals, H*W, in_channels) # param_in has shape (num_all_proposals, in_channels, feat_channels) # feature has shape (num_all_proposals, H*W, feat_channels) features = torch.bmm(input_feature, param_in) features = self.norm_in(features) features = self.activation(features) # param_out has shape (batch_size, feat_channels, out_channels) features = torch.bmm(features, param_out) features = self.norm_out(features) features = self.activation(features) if self.with_proj: features = features.flatten(1) features = self.fc_layer(features) features = self.fc_norm(features) features = self.activation(features) return features
45,903
38.572414
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/make_divisible.py
# Copyright (c) OpenMMLab. All rights reserved. def make_divisible(value, divisor, min_value=None, min_ratio=0.9): """Make divisible function. This function rounds the channel number to the nearest value that can be divisible by the divisor. It is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by divisor. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa Args: value (int): The original channel number. divisor (int): The divisor to fully divide the channel number. min_value (int): The minimum value of the output channel. Default: None, means that the minimum value equal to the divisor. min_ratio (float): The minimum ratio of the rounded channel number to the original channel number. Default: 0.9. Returns: int: The modified output channel number. """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than (1-min_ratio). if new_value < min_ratio * value: new_value += divisor return new_value
1,279
43.137931
116
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/positional_encoding.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING from mmcv.runner import BaseModule @POSITIONAL_ENCODING.register_module() class SinePositionalEncoding(BaseModule): """Position encoding with sine and cosine functions. See `End-to-End Object Detection with Transformers <https://arxiv.org/pdf/2005.12872>`_ for details. Args: num_feats (int): The feature dimension for each position along x-axis or y-axis. Note the final returned dimension for each position is 2 times of this value. temperature (int, optional): The temperature used for scaling the position embedding. Defaults to 10000. normalize (bool, optional): Whether to normalize the position embedding. Defaults to False. scale (float, optional): A scale factor that scales the position embedding. The scale will be used only when `normalize` is True. Defaults to 2*pi. eps (float, optional): A value added to the denominator for numerical stability. Defaults to 1e-6. offset (float): offset add to embed when do the normalization. Defaults to 0. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_feats, temperature=10000, normalize=False, scale=2 * math.pi, eps=1e-6, offset=0., init_cfg=None): super(SinePositionalEncoding, self).__init__(init_cfg) if normalize: assert isinstance(scale, (float, int)), 'when normalize is set,' \ 'scale should be provided and in float or int type, ' \ f'found {type(scale)}' self.num_feats = num_feats self.temperature = temperature self.normalize = normalize self.scale = scale self.eps = eps self.offset = offset def forward(self, mask): """Forward function for `SinePositionalEncoding`. Args: mask (Tensor): ByteTensor mask. Non-zero values representing ignored positions, while zero values means valid positions for this image. Shape [bs, h, w]. Returns: pos (Tensor): Returned position embedding with shape [bs, num_feats*2, h, w]. """ # For convenience of exporting to ONNX, it's required to convert # `masks` from bool to int. mask = mask.to(torch.int) not_mask = 1 - mask # logical_not y_embed = not_mask.cumsum(1, dtype=torch.float32) x_embed = not_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = (y_embed + self.offset) / \ (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = (x_embed + self.offset) / \ (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange( self.num_feats, dtype=torch.float32, device=mask.device) dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t # use `view` instead of `flatten` for dynamically exporting to ONNX B, H, W = mask.size() pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(B, H, W, -1) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(B, H, W, -1) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos def __repr__(self): """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' repr_str += f'temperature={self.temperature}, ' repr_str += f'normalize={self.normalize}, ' repr_str += f'scale={self.scale}, ' repr_str += f'eps={self.eps})' return repr_str @POSITIONAL_ENCODING.register_module() class LearnedPositionalEncoding(BaseModule): """Position embedding with learnable embedding weights. Args: num_feats (int): The feature dimension for each position along x-axis or y-axis. The final returned dimension for each position is 2 times of this value. row_num_embed (int, optional): The dictionary size of row embeddings. Default 50. col_num_embed (int, optional): The dictionary size of col embeddings. Default 50. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_feats, row_num_embed=50, col_num_embed=50, init_cfg=dict(type='Uniform', layer='Embedding')): super(LearnedPositionalEncoding, self).__init__(init_cfg) self.row_embed = nn.Embedding(row_num_embed, num_feats) self.col_embed = nn.Embedding(col_num_embed, num_feats) self.num_feats = num_feats self.row_num_embed = row_num_embed self.col_num_embed = col_num_embed def forward(self, mask): """Forward function for `LearnedPositionalEncoding`. Args: mask (Tensor): ByteTensor mask. Non-zero values representing ignored positions, while zero values means valid positions for this image. Shape [bs, h, w]. Returns: pos (Tensor): Returned position embedding with shape [bs, num_feats*2, h, w]. """ h, w = mask.shape[-2:] x = torch.arange(w, device=mask.device) y = torch.arange(h, device=mask.device) x_embed = self.col_embed(x) y_embed = self.row_embed(y) pos = torch.cat( (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( 1, w, 1)), dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) return pos def __repr__(self): """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' repr_str += f'row_num_embed={self.row_num_embed}, ' repr_str += f'col_num_embed={self.col_num_embed})' return repr_str
6,568
39.054878
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/inverted_residual.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. mid_channels (int): The input channels of the depthwise convolution. kernel_size (int): The kernel size of the depthwise convolution. Default: 3. stride (int): The stride of the depthwise convolution. Default: 1. se_cfg (dict): Config dict for se layer. Default: None, which means no se layer. with_expand_conv (bool): Use expand conv or not. If set False, mid_channels must be the same with in_channels. Default: True. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Returns: Tensor: The output tensor. """ def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), with_cp=False, init_cfg=None): super(InvertedResidual, self).__init__(init_cfg) self.with_res_shortcut = (stride == 1 and in_channels == out_channels) assert stride in [1, 2], f'stride must in [1, 2]. ' \ f'But received {stride}.' self.with_cp = with_cp self.with_se = se_cfg is not None self.with_expand_conv = with_expand_conv if self.with_se: assert isinstance(se_cfg, dict) if not self.with_expand_conv: assert mid_channels == in_channels if self.with_expand_conv: self.expand_conv = ConvModule( in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.depthwise_conv = ConvModule( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=mid_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.linear_conv = ConvModule( in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x if self.with_expand_conv: out = self.expand_conv(out) out = self.depthwise_conv(out) if self.with_se: out = self.se(out) out = self.linear_conv(out) if self.with_res_shortcut: return x + out else: return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out
4,095
31.768
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .builder import build_linear_layer, build_transformer from .ckpt_convert import pvt_convert from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .gaussian_target import gaussian_radius, gen_gaussian_target from .inverted_residual import InvertedResidual from .make_divisible import make_divisible from .misc import interpolate_as from .normed_predictor import NormedConv2d, NormedLinear from .positional_encoding import (LearnedPositionalEncoding, SinePositionalEncoding) from .res_layer import ResLayer, SimplifiedBasicBlock from .se_layer import SELayer from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, DynamicConv, PatchEmbed, Transformer, nchw_to_nlc, nlc_to_nchw) __all__ = [ 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual', 'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'pvt_convert' ]
1,468
47.966667
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/utils/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.utils import Registry, build_from_cfg TRANSFORMER = Registry('Transformer') LINEAR_LAYERS = Registry('linear layers') def build_transformer(cfg, default_args=None): """Builder for Transformer.""" return build_from_cfg(cfg, TRANSFORMER, default_args) LINEAR_LAYERS.register_module('Linear', module=nn.Linear) def build_linear_layer(cfg, *args, **kwargs): """Build linear layer. Args: cfg (None or dict): The linear layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate an linear layer. args (argument list): Arguments passed to the `__init__` method of the corresponding linear layer. kwargs (keyword arguments): Keyword arguments passed to the `__init__` method of the corresponding linear layer. Returns: nn.Module: Created linear layer. """ if cfg is None: cfg_ = dict(type='Linear') else: if not isinstance(cfg, dict): raise TypeError('cfg must be a dict') if 'type' not in cfg: raise KeyError('the cfg dict must contain the key "type"') cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in LINEAR_LAYERS: raise KeyError(f'Unrecognized linear type {layer_type}') else: linear_layer = LINEAR_LAYERS.get(layer_type) layer = linear_layer(*args, **kwargs, **cfg_) return layer
1,535
31
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/standard_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Simplest base roi head including one bbox head and one mask head.""" def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``""" self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) self.bbox_head = build_head(bbox_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = build_head(mask_head) def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): """Box head forward function used in both training and testing.""" # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update(loss_bbox=loss_bbox) return bbox_results def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for mask head in training.""" if not self.share_roi_extractor: pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward(x, pos_rois) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_results = self._mask_forward( x, pos_inds=pos_inds, bbox_feats=bbox_feats) mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, self.train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets) return mask_results def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): """Mask head forward function used in both training and testing.""" assert ((rois is not None) ^ (pos_inds is not None and bbox_feats is not None)) if rois is not None: mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: assert bbox_feats is not None mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats) return mask_results async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Async test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = await self.async_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) if not self.with_mask: return bbox_results else: segm_results = await self.async_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale, mask_test_cfg=self.test_cfg.get('mask')) return bbox_results, segm_results def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head.num_classes) for i in range(len(det_bboxes)) ] if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) return list(zip(bbox_results, segm_results)) def aug_test(self, x, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas, proposal_list, self.test_cfg) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) # det_bboxes always keep the original scale if self.with_mask: segm_results = self.aug_test_mask(x, img_metas, det_bboxes, det_labels) return [(bbox_results, segm_results)] else: return [bbox_results] def onnx_export(self, x, proposals, img_metas, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.bbox_onnx_export( x, img_metas, proposals, self.test_cfg, rescale=rescale) if not self.with_mask: return det_bboxes, det_labels else: segm_results = self.mask_onnx_export( x, img_metas, det_bboxes, det_labels, rescale=rescale) return det_bboxes, det_labels, segm_results def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): """Export mask branch to onnx which supports batch inference. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. det_bboxes (Tensor): Bboxes and corresponding scores. has shape [N, num_bboxes, 5]. det_labels (Tensor): class labels of shape [N, num_bboxes]. Returns: Tensor: The segmentation results of shape [N, num_bboxes, image_height, image_width]. """ # image shapes of images in the batch if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): raise RuntimeError('[ONNX Error] Can not record MaskHead ' 'as it has not been executed this time') batch_size = det_bboxes.size(0) # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. det_bboxes = det_bboxes[..., :4] batch_index = torch.arange( det_bboxes.size(0), device=det_bboxes.device).float().view( -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) mask_rois = mask_rois.view(-1, 5) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] max_shape = img_metas[0]['img_shape_for_onnx'] num_det = det_bboxes.shape[1] det_bboxes = det_bboxes.reshape(-1, 4) det_labels = det_labels.reshape(-1) segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, det_labels, self.test_cfg, max_shape) segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], max_shape[1]) return segm_results def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg, **kwargs): """Export bbox branch to onnx which supports batch inference. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. proposals (Tensor): Region proposals with batch dimension, has shape [N, num_bboxes, 5]. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. Returns: tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5] and class labels of shape [N, num_bboxes]. """ # get origin input shape to support onnx dynamic input shape assert len( img_metas ) == 1, 'Only support one input image while in exporting to ONNX' img_shapes = img_metas[0]['img_shape_for_onnx'] rois = proposals batch_index = torch.arange( rois.size(0), device=rois.device).float().view(-1, 1, 1).expand( rois.size(0), rois.size(1), 1) rois = torch.cat([batch_index, rois[..., :4]], dim=-1) batch_size = rois.shape[0] num_proposals_per_img = rois.shape[1] # Eliminate the batch dimension rois = rois.view(-1, 5) bbox_results = self._bbox_forward(x, rois) cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Recover the batch dimension rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) cls_score = cls_score.reshape(batch_size, num_proposals_per_img, cls_score.size(-1)) bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, bbox_pred.size(-1)) det_bboxes, det_labels = self.bbox_head.onnx_export( rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg) return det_bboxes, det_labels
17,132
42.047739
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/grid_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.core import bbox2result, bbox2roi from ..builder import HEADS, build_head, build_roi_extractor from .standard_roi_head import StandardRoIHead @HEADS.register_module() class GridRoIHead(StandardRoIHead): """Grid roi head for Grid R-CNN. https://arxiv.org/abs/1811.12030 """ def __init__(self, grid_roi_extractor, grid_head, **kwargs): assert grid_head is not None super(GridRoIHead, self).__init__(**kwargs) if grid_roi_extractor is not None: self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.grid_roi_extractor = self.bbox_roi_extractor self.grid_head = build_head(grid_head) def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): """Ramdom jitter positive proposals for training.""" for sampling_result, img_meta in zip(sampling_results, img_metas): bboxes = sampling_result.pos_bboxes random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( -amplitude, amplitude) # before jittering cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() # after jittering new_cxcy = cxcy + wh * random_offsets[:, :2] new_wh = wh * (1 + random_offsets[:, 2:]) # xywh to xyxy new_x1y1 = (new_cxcy - new_wh / 2) new_x2y2 = (new_cxcy + new_wh / 2) new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) # clip bboxes max_shape = img_meta['img_shape'] if max_shape is not None: new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) sampling_result.pos_bboxes = new_bboxes return sampling_results def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # grid head grid_rois = rois[:100] grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], grid_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) grid_pred = self.grid_head(grid_feats) outs = outs + (grid_pred, ) # mask head if self.with_mask: mask_rois = rois[:100] mask_results = self._mask_forward(x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" bbox_results = super(GridRoIHead, self)._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) # Grid head forward and loss sampling_results = self._random_jitter(sampling_results, img_metas) pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) # GN in head does not support zero shape input if pos_rois.shape[0] == 0: return bbox_results grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) # Accelerate training max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) sample_idx = torch.randperm( grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid )] grid_feats = grid_feats[sample_idx] grid_pred = self.grid_head(grid_feats) grid_targets = self.grid_head.get_targets(sampling_results, self.train_cfg) grid_targets = grid_targets[sample_idx] loss_grid = self.grid_head.loss(grid_pred, grid_targets) bbox_results['loss_bbox'].update(loss_grid) return bbox_results def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes, det_labels = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=False) # pack rois into bboxes grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes]) if grid_rois.shape[0] != 0: grid_feats = self.grid_roi_extractor( x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) self.grid_head.test_mode = True grid_pred = self.grid_head(grid_feats) # split batch grid head prediction back to each image num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes) grid_pred = { k: v.split(num_roi_per_img, 0) for k, v in grid_pred.items() } # apply bbox post-processing to each image individually bbox_results = [] num_imgs = len(det_bboxes) for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: bbox_results.append([ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head.num_classes) ]) else: det_bbox = self.grid_head.get_bboxes( det_bboxes[i], grid_pred['fused'][i], [img_metas[i]]) if rescale: det_bbox[:, :4] /= img_metas[i]['scale_factor'] bbox_results.append( bbox2result(det_bbox, det_labels[i], self.bbox_head.num_classes)) else: bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head.num_classes) ] for _ in range(len(det_bboxes))] if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_metas, det_bboxes, det_labels, rescale=rescale) return list(zip(bbox_results, segm_results))
6,961
39.71345
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/scnet_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from ..utils.brick_wrappers import adaptive_avg_pool2d from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class SCNetRoIHead(CascadeRoIHead): """RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: num_stages (int): number of cascade stages. stage_loss_weights (list): loss weight of cascade stages. semantic_roi_extractor (dict): config to init semantic roi extractor. semantic_head (dict): config to init semantic head. feat_relay_head (dict): config to init feature_relay_head. glbctx_head (dict): config to init global context head. """ def __init__(self, num_stages, stage_loss_weights, semantic_roi_extractor=None, semantic_head=None, feat_relay_head=None, glbctx_head=None, **kwargs): super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, **kwargs) assert self.with_bbox and self.with_mask assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = build_roi_extractor( semantic_roi_extractor) self.semantic_head = build_head(semantic_head) if feat_relay_head is not None: self.feat_relay_head = build_head(feat_relay_head) if glbctx_head is not None: self.glbctx_head = build_head(glbctx_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.mask_head = build_head(mask_head) @property def with_semantic(self): """bool: whether the head has semantic head""" return hasattr(self, 'semantic_head') and self.semantic_head is not None @property def with_feat_relay(self): """bool: whether the head has feature relay head""" return (hasattr(self, 'feat_relay_head') and self.feat_relay_head is not None) @property def with_glbctx(self): """bool: whether the head has global context head""" return hasattr(self, 'glbctx_head') and self.glbctx_head is not None def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): """Fuse global context feats with roi feats.""" assert roi_feats.size(0) == rois.size(0) img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() fused_feats = torch.zeros_like(roi_feats) for img_id in img_inds: inds = (rois[:, 0] == img_id.item()) fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] return fused_feats def _slice_pos_feats(self, feats, sampling_results): """Get features from pos rois.""" num_rois = [res.bboxes.size(0) for res in sampling_results] num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] inds = torch.zeros(sum(num_rois), dtype=torch.bool) start = 0 for i in range(len(num_rois)): start = 0 if i == 0 else start + num_rois[i - 1] stop = start + num_pos_rois[i] inds[start:stop] = 1 sliced_feats = feats[inds] return sliced_feats def _bbox_forward(self, stage, x, rois, semantic_feat=None, glbctx_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and semantic_feat is not None: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat if self.with_glbctx and glbctx_feat is not None: bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) cls_score, bbox_pred, relayed_feat = bbox_head( bbox_feats, return_shared_feat=True) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, relayed_feat=relayed_feat) return bbox_results def _mask_forward(self, x, rois, semantic_feat=None, glbctx_feat=None, relayed_feat=None): """Mask head forward function used in both training and testing.""" mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], rois) if self.with_semantic and semantic_feat is not None: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat if self.with_glbctx and glbctx_feat is not None: mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) if self.with_feat_relay and relayed_feat is not None: mask_feats = mask_feats + relayed_feat mask_pred = self.mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None, glbctx_feat=None): """Run forward function and calculate loss for box head in training.""" bbox_head = self.bbox_head[stage] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) return bbox_results def _mask_forward_train(self, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None, glbctx_feat=None, relayed_feat=None): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward( x, pos_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results = loss_mask return mask_results def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposal_list (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None, list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None, Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (None, list[Tensor]): semantic segmentation masks used if the architecture supports semantic segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ losses = dict() # semantic segmentation branch if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None # global context branch if self.with_glbctx: mc_pred, glbctx_feat = self.glbctx_head(x) loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) losses['loss_glbctx'] = loss_glbctx else: glbctx_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) bbox_results = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat, glbctx_feat) roi_labels = bbox_results['bbox_targets'][0] for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine boxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) if self.with_feat_relay: relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], sampling_results) relayed_feat = self.feat_relay_head(relayed_feat) else: relayed_feat = None mask_results = self._mask_forward_train(x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat, glbctx_feat, relayed_feat) mask_lw = sum(self.stage_loss_weights) losses['loss_mask'] = mask_lw * mask_results['loss_mask'] return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None if self.with_glbctx: mc_pred, glbctx_feat = self.glbctx_head(x) else: glbctx_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head.num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.num_stages - 1: refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_rois = bbox_head.regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refine_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) det_bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head.num_classes det_segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) # get relay feature on mask_rois bbox_results = self._bbox_forward( -1, x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bbox_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) mask_preds = mask_pred.split(num_bbox_per_img, 0) # apply mask post-processing to each image individually det_segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: det_segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) det_segm_results.append(segm_result) # return results if self.with_mask: return list(zip(det_bbox_results, det_segm_results)) else: return det_bbox_results def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in img_feats ] else: semantic_feats = [None] * len(img_metas) if self.with_glbctx: glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] else: glbctx_feats = [None] * len(img_metas) rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta, semantic_feat, glbctx_feat in zip( img_feats, img_metas, semantic_feats, glbctx_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'].argmax(dim=1) rois = bbox_head.regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) det_bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: det_segm_results = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta, semantic_feat, glbctx_feat in zip( img_feats, img_metas, semantic_feats, glbctx_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) # get relay feature on mask_rois bbox_results = self._bbox_forward( -1, x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat) relayed_feat = bbox_results['relayed_feat'] relayed_feat = self.feat_relay_head(relayed_feat) mask_results = self._mask_forward( x, mask_rois, semantic_feat=semantic_feat, glbctx_feat=glbctx_feat, relayed_feat=relayed_feat) mask_pred = mask_results['mask_pred'] aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] det_segm_results = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(det_bbox_results, det_segm_results)] else: return [det_bbox_results]
25,683
41.382838
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/double_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for Double Head RCNN. https://arxiv.org/abs/1904.06493 """ def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x, rois): """Box head forward function used in both training and testing time.""" bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results
1,250
34.742857
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/sparse_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh from mmdet.core.bbox.samplers import PseudoSampler from ..builder import HEADS from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class SparseRoIHead(CascadeRoIHead): r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_ and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_ Args: num_stages (int): Number of stage whole iterative process. Defaults to 6. stage_loss_weights (Tuple[float]): The loss weight of each stage. By default all stages have the same weight 1. bbox_roi_extractor (dict): Config of box roi extractor. mask_roi_extractor (dict): Config of mask roi extractor. bbox_head (dict): Config of box head. mask_head (dict): Config of mask head. train_cfg (dict, optional): Configuration information in train stage. Defaults to None. test_cfg (dict, optional): Configuration information in test stage. Defaults to None. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_stages=6, stage_loss_weights=(1, 1, 1, 1, 1, 1), proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_roi_extractor=None, bbox_head=dict( type='DIIHead', num_classes=80, num_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, hidden_channels=256, dropout=0.0, roi_feat_size=7, ffn_act_cfg=dict(type='ReLU', inplace=True)), mask_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): assert bbox_roi_extractor is not None assert bbox_head is not None assert len(stage_loss_weights) == num_stages self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights self.proposal_feature_channel = proposal_feature_channel super(SparseRoIHead, self).__init__( num_stages, stage_loss_weights, bbox_roi_extractor=bbox_roi_extractor, mask_roi_extractor=mask_roi_extractor, bbox_head=bbox_head, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) # train_cfg would be None when run the test.py if train_cfg is not None: for stage in range(num_stages): assert isinstance(self.bbox_sampler[stage], PseudoSampler), \ 'Sparse R-CNN and QueryInst only support `PseudoSampler`' def _bbox_forward(self, stage, x, rois, object_feats, img_metas): """Box head forward function used in both training and testing. Returns all regression, classification results and a intermediate feature. Args: stage (int): The index of current stage in iterative process. x (List[Tensor]): List of FPN features rois (Tensor): Rois in total batch. With shape (num_proposal, 5). the last dimension 5 represents (img_index, x1, y1, x2, y2). object_feats (Tensor): The object feature extracted from the previous stage. img_metas (dict): meta information of images. Returns: dict[str, Tensor]: a dictionary of bbox head outputs, Containing the following results: - cls_score (Tensor): The score of each class, has shape (batch_size, num_proposals, num_classes) when use focal loss or (batch_size, num_proposals, num_classes+1) otherwise. - decode_bbox_pred (Tensor): The regression results with shape (batch_size, num_proposal, 4). The last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - object_feats (Tensor): The object feature extracted from current stage - detach_cls_score_list (list[Tensor]): The detached classification results, length is batch_size, and each tensor has shape (num_proposal, num_classes). - detach_proposal_list (list[tensor]): The detached regression results, length is batch_size, and each tensor has shape (num_proposal, 4). The last dimension 4 represents [tl_x, tl_y, br_x, br_y]. """ num_imgs = len(img_metas) bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) cls_score, bbox_pred, object_feats, attn_feats = bbox_head( bbox_feats, object_feats) proposal_list = self.bbox_head[stage].refine_bboxes( rois, rois.new_zeros(len(rois)), # dummy arg bbox_pred.view(-1, bbox_pred.size(-1)), [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)], img_metas) bbox_results = dict( cls_score=cls_score, decode_bbox_pred=torch.cat(proposal_list), object_feats=object_feats, attn_feats=attn_feats, # detach then use it in label assign detach_cls_score_list=[ cls_score[i].detach() for i in range(num_imgs) ], detach_proposal_list=[item.detach() for item in proposal_list]) return bbox_results def _mask_forward(self, stage, x, rois, attn_feats): """Mask head forward function used in both training and testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_pred = mask_head(mask_feats, attn_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _mask_forward_train(self, stage, x, attn_feats, sampling_results, gt_masks, rcnn_train_cfg): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) attn_feats = torch.cat([ feats[res.pos_inds] for (feats, res) in zip(attn_feats, sampling_results) ]) mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) mask_targets = self.mask_head[stage].get_targets( sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask) return mask_results def forward_train(self, x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, imgs_whwh=None, gt_masks=None): """Forward function in training stage. Args: x (list[Tensor]): list of multi-level img features. proposals (Tensor): Decoded proposal bboxes, has shape (batch_size, num_proposals, 4) proposal_features (Tensor): Expanded proposal features, has shape (batch_size, num_proposals, proposal_feature_channel) img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. imgs_whwh (Tensor): Tensor with shape (batch_size, 4), the dimension means [img_width,img_height, img_width, img_height]. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components of all stage. """ num_imgs = len(img_metas) num_proposals = proposal_boxes.size(1) imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1) all_stage_bbox_results = [] proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] object_feats = proposal_features all_stage_loss = {} for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, img_metas) all_stage_bbox_results.append(bbox_results) if gt_bboxes_ignore is None: # TODO support ignore gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] cls_pred_list = bbox_results['detach_cls_score_list'] proposal_list = bbox_results['detach_proposal_list'] for i in range(num_imgs): normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] / imgs_whwh[i]) assign_result = self.bbox_assigner[stage].assign( normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i], gt_labels[i], img_metas[i]) sampling_result = self.bbox_sampler[stage].sample( assign_result, proposal_list[i], gt_bboxes[i]) sampling_results.append(sampling_result) bbox_targets = self.bbox_head[stage].get_targets( sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage], True) cls_score = bbox_results['cls_score'] decode_bbox_pred = bbox_results['decode_bbox_pred'] single_stage_loss = self.bbox_head[stage].loss( cls_score.view(-1, cls_score.size(-1)), decode_bbox_pred.view(-1, 4), *bbox_targets, imgs_whwh=imgs_whwh) if self.with_mask: mask_results = self._mask_forward_train( stage, x, bbox_results['attn_feats'], sampling_results, gt_masks, self.train_cfg[stage]) single_stage_loss['loss_mask'] = mask_results['loss_mask'] for key, value in single_stage_loss.items(): all_stage_loss[f'stage{stage}_{key}'] = value * \ self.stage_loss_weights[stage] object_feats = bbox_results['object_feats'] return all_stage_loss def simple_test(self, x, proposal_boxes, proposal_features, img_metas, imgs_whwh, rescale=False): """Test without augmentation. Args: x (list[Tensor]): list of multi-level img features. proposal_boxes (Tensor): Decoded proposal bboxes, has shape (batch_size, num_proposals, 4) proposal_features (Tensor): Expanded proposal features, has shape (batch_size, num_proposals, proposal_feature_channel) img_metas (dict): meta information of images. imgs_whwh (Tensor): Tensor with shape (batch_size, 4), the dimension means [img_width,img_height, img_width, img_height]. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has a mask branch, it is a list[tuple] that contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ assert self.with_bbox, 'Bbox head must be implemented.' # Decode initial proposals num_imgs = len(img_metas) proposal_list = [proposal_boxes[i] for i in range(num_imgs)] ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) object_feats = proposal_features if all([proposal.shape[0] == 0 for proposal in proposal_list]): # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for i in range(self.bbox_head[-1].num_classes) ]] * num_imgs return bbox_results for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, img_metas) object_feats = bbox_results['object_feats'] cls_score = bbox_results['cls_score'] proposal_list = bbox_results['detach_proposal_list'] if self.with_mask: rois = bbox2roi(proposal_list) mask_results = self._mask_forward(stage, x, rois, bbox_results['attn_feats']) mask_results['mask_pred'] = mask_results['mask_pred'].reshape( num_imgs, -1, *mask_results['mask_pred'].size()[1:]) num_classes = self.bbox_head[-1].num_classes det_bboxes = [] det_labels = [] if self.bbox_head[-1].loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() else: cls_score = cls_score.softmax(-1)[..., :-1] for img_id in range(num_imgs): cls_score_per_img = cls_score[img_id] scores_per_img, topk_indices = cls_score_per_img.flatten( 0, 1).topk( self.test_cfg.max_per_img, sorted=False) labels_per_img = topk_indices % num_classes bbox_pred_per_img = proposal_list[img_id][topk_indices // num_classes] if rescale: scale_factor = img_metas[img_id]['scale_factor'] bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor) det_bboxes.append( torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1)) det_labels.append(labels_per_img) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], num_classes) for i in range(num_imgs) ] if self.with_mask: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] segm_results = [] mask_pred = mask_results['mask_pred'] for img_id in range(num_imgs): mask_pred_per_img = mask_pred[img_id].flatten(0, 1)[topk_indices] mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat( 1, num_classes, 1, 1) segm_result = self.mask_head[-1].get_seg_masks( mask_pred_per_img, _bboxes[img_id], det_labels[img_id], self.test_cfg, ori_shapes[img_id], scale_factors[img_id], rescale) segm_results.append(segm_result) if self.with_mask: results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results def aug_test(self, features, proposal_list, img_metas, rescale=False): raise NotImplementedError( 'Sparse R-CNN and QueryInst does not support `aug_test`') def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas): """Dummy forward function when do the flops computing.""" all_stage_bbox_results = [] proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] object_feats = proposal_features if self.with_bbox: for stage in range(self.num_stages): rois = bbox2roi(proposal_list) bbox_results = self._bbox_forward(stage, x, rois, object_feats, img_metas) all_stage_bbox_results.append((bbox_results, )) proposal_list = bbox_results['detach_proposal_list'] object_feats = bbox_results['object_feats'] if self.with_mask: rois = bbox2roi(proposal_list) mask_results = self._mask_forward( stage, x, rois, bbox_results['attn_feats']) all_stage_bbox_results[-1] += (mask_results, ) return all_stage_bbox_results
19,280
44.367059
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/cascade_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.runner import ModuleList from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1712.00726 """ def __init__(self, num_stages, stage_loss_weights, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): assert bbox_roi_extractor is not None assert bbox_head is not None assert shared_head is None, \ 'Shared head is not supported in Cascade RCNN anymore' self.num_stages = num_stages self.stage_loss_weights = stage_loss_weights super(CascadeRoIHead, self).__init__( bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, shared_head=shared_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize box head and box roi extractor. Args: bbox_roi_extractor (dict): Config of box roi extractor. bbox_head (dict): Config of box in box head. """ self.bbox_roi_extractor = ModuleList() self.bbox_head = ModuleList() if not isinstance(bbox_roi_extractor, list): bbox_roi_extractor = [ bbox_roi_extractor for _ in range(self.num_stages) ] if not isinstance(bbox_head, list): bbox_head = [bbox_head for _ in range(self.num_stages)] assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor)) self.bbox_head.append(build_head(head)) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize mask head and mask roi extractor. Args: mask_roi_extractor (dict): Config of mask roi extractor. mask_head (dict): Config of mask in mask head. """ self.mask_head = nn.ModuleList() if not isinstance(mask_head, list): mask_head = [mask_head for _ in range(self.num_stages)] assert len(mask_head) == self.num_stages for head in mask_head: self.mask_head.append(build_head(head)) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = ModuleList() if not isinstance(mask_roi_extractor, list): mask_roi_extractor = [ mask_roi_extractor for _ in range(self.num_stages) ] assert len(mask_roi_extractor) == self.num_stages for roi_extractor in mask_roi_extractor: self.mask_roi_extractor.append( build_roi_extractor(roi_extractor)) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor def init_assigner_sampler(self): """Initialize assigner and sampler for each stage.""" self.bbox_assigner = [] self.bbox_sampler = [] if self.train_cfg is not None: for idx, rcnn_train_cfg in enumerate(self.train_cfg): self.bbox_assigner.append( build_assigner(rcnn_train_cfg.assigner)) self.current_stage = idx self.bbox_sampler.append( build_sampler(rcnn_train_cfg.sampler, context=self)) def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask heads if self.with_mask: mask_rois = rois[:100] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) outs = outs + (mask_results['mask_pred'], ) return outs def _bbox_forward(self, stage, x, rois): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(stage, x, rois) bbox_targets = self.bbox_head[stage].get_targets( sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) return bbox_results def _mask_forward(self, stage, x, rois): """Mask head forward function used in both training and testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], rois) # do not support caffe_c4 model anymore mask_pred = mask_head(mask_feats) mask_results = dict(mask_pred=mask_pred) return mask_results def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, bbox_feats=None): """Run forward function and calculate loss for mask head in training.""" pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_results = self._mask_forward(stage, x, pos_rois) mask_targets = self.mask_head[stage].get_targets( sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], mask_targets, pos_labels) mask_results.update(loss_mask=loss_mask) return mask_results def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ losses = dict() for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] if self.with_bbox or self.with_mask: bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = self._bbox_forward_train(i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train( i, x, sampling_results, gt_masks, rcnn_train_cfg, bbox_results['bbox_feats']) for name, value in mask_results['loss_mask'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine bboxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] # bbox_targets is a tuple roi_labels = bbox_results['bbox_targets'][0] with torch.no_grad(): cls_score = bbox_results['cls_score'] if self.bbox_head[i].custom_activation: cls_score = self.bbox_head[i].loss_cls.get_activation( cls_score) # Empty proposal. if cls_score.numel() == 0: break roi_labels = torch.where( roi_labels == self.bbox_head[i].num_classes, cls_score[:, :-1].argmax(1), roi_labels) proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ assert self.with_bbox, 'Bbox head must be implemented.' num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple( len(proposals) for proposals in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) if isinstance(bbox_pred, torch.Tensor): bbox_pred = bbox_pred.split(num_proposals_per_img, 0) else: bbox_pred = self.bbox_head[i].bbox_pred_split( bbox_pred, num_proposals_per_img) ms_scores.append(cls_score) if i < self.num_stages - 1: if self.bbox_head[i].custom_activation: cls_score = [ self.bbox_head[i].loss_cls.get_activation(s) for s in cls_score ] refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refined_rois = self.bbox_head[i].regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refined_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] ms_bbox_result['ensemble'] = bbox_results if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) num_mask_rois_per_img = tuple( _bbox.size(0) for _bbox in _bboxes) aug_masks = [] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image mask_pred = mask_pred.split(num_mask_rois_per_img, 0) aug_masks.append([ m.sigmoid().cpu().detach().numpy() for m in mask_pred ]) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head[-1].num_classes)]) else: aug_mask = [mask[i] for mask in aug_masks] merged_masks = merge_aug_masks( aug_mask, [[img_metas[i]]] * self.num_stages, rcnn_test_cfg) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, _bboxes[i], det_labels[i], rcnn_test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) ms_segm_result['ensemble'] = segm_results if self.with_mask: results = list( zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) else: results = ms_bbox_result['ensemble'] return results def aug_test(self, features, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta in zip(features, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: cls_score = bbox_results['cls_score'] if self.bbox_head[i].custom_activation: cls_score = self.bbox_head[i].loss_cls.get_activation( cls_score) bbox_label = cls_score[:, :-1].argmax(dim=1) rois = self.bbox_head[i].regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head[-1].num_classes)] else: aug_masks = [] aug_img_metas = [] for x, img_meta in zip(features, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] dummy_scale_factor = np.ones(4) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=dummy_scale_factor, rescale=False) return [(bbox_result, segm_result)] else: return [bbox_result] def onnx_export(self, x, proposals, img_metas): assert self.with_bbox, 'Bbox head must be implemented.' assert proposals.shape[0] == 1, 'Only support one input image ' \ 'while in exporting to ONNX' # remove the scores rois = proposals[..., :-1] batch_size = rois.shape[0] num_proposals_per_img = rois.shape[1] # Eliminate the batch dimension rois = rois.view(-1, 4) # add dummy batch index rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) max_shape = img_metas[0]['img_shape_for_onnx'] ms_scores = [] rcnn_test_cfg = self.test_cfg for i in range(self.num_stages): bbox_results = self._bbox_forward(i, x, rois) cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Recover the batch dimension rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) cls_score = cls_score.reshape(batch_size, num_proposals_per_img, cls_score.size(-1)) bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) ms_scores.append(cls_score) if i < self.num_stages - 1: assert self.bbox_head[i].reg_class_agnostic new_rois = self.bbox_head[i].bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=max_shape) rois = new_rois.reshape(-1, new_rois.shape[-1]) # add dummy batch index rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) cls_score = sum(ms_scores) / float(len(ms_scores)) bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) rois = rois.reshape(batch_size, num_proposals_per_img, -1) det_bboxes, det_labels = self.bbox_head[-1].onnx_export( rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg) if not self.with_mask: return det_bboxes, det_labels else: batch_index = torch.arange( det_bboxes.size(0), device=det_bboxes.device).float().view(-1, 1, 1).expand( det_bboxes.size(0), det_bboxes.size(1), 1) rois = det_bboxes[..., :4] mask_rois = torch.cat([batch_index, rois], dim=-1) mask_rois = mask_rois.view(-1, 5) aug_masks = [] for i in range(self.num_stages): mask_results = self._mask_forward(i, x, mask_rois) mask_pred = mask_results['mask_pred'] aug_masks.append(mask_pred) max_shape = img_metas[0]['img_shape_for_onnx'] # calculate the mean of masks from several stage mask_pred = sum(aug_masks) / len(aug_masks) segm_results = self.mask_head[-1].onnx_export( mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1), self.test_cfg, max_shape) segm_results = segm_results.reshape(batch_size, det_bboxes.shape[1], max_shape[0], max_shape[1]) return det_bboxes, det_labels, segm_results
27,668
42.780063
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/trident_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import batched_nms from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, multiclass_nms) from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead from ..builder import HEADS @HEADS.register_module() class TridentRoIHead(StandardRoIHead): """Trident roi head. Args: num_branch (int): Number of branches in TridentNet. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. """ def __init__(self, num_branch, test_branch_idx, **kwargs): self.num_branch = num_branch self.test_branch_idx = test_branch_idx super(TridentRoIHead, self).__init__(**kwargs) def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels): """Merge bbox predictions of each branch.""" if trident_det_bboxes.numel() == 0: det_bboxes = trident_det_bboxes.new_zeros((0, 5)) det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long) else: nms_bboxes = trident_det_bboxes[:, :4] nms_scores = trident_det_bboxes[:, 4].contiguous() nms_inds = trident_det_labels nms_cfg = self.test_cfg['nms'] det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds, nms_cfg) det_labels = trident_det_labels[keep] if self.test_cfg['max_per_img'] > 0: det_labels = det_labels[:self.test_cfg['max_per_img']] det_bboxes = det_bboxes[:self.test_cfg['max_per_img']] return det_bboxes, det_labels def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation as follows: 1. Compute prediction bbox and label per branch. 2. Merge predictions of each branch according to scores of bboxes, i.e., bboxes with higher score are kept to give top-k prediction. """ assert self.with_bbox, 'Bbox head must be implemented.' det_bboxes_list, det_labels_list = self.simple_test_bboxes( x, img_metas, proposal_list, self.test_cfg, rescale=rescale) num_branch = self.num_branch if self.test_branch_idx == -1 else 1 for _ in range(len(det_bboxes_list)): if det_bboxes_list[_].shape[0] == 0: det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5)) det_bboxes, det_labels = [], [] for i in range(len(img_metas) // num_branch): det_result = self.merge_trident_bboxes( torch.cat(det_bboxes_list[i * num_branch:(i + 1) * num_branch]), torch.cat(det_labels_list[i * num_branch:(i + 1) * num_branch])) det_bboxes.append(det_result[0]) det_labels.append(det_result[1]) bbox_results = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head.num_classes) for i in range(len(det_bboxes)) ] return bbox_results def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): """Test det bboxes with test time augmentation.""" aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] trident_bboxes, trident_scores = [], [] for branch_idx in range(len(proposal_list)): proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) rois = bbox2roi([proposals]) bbox_results = self._bbox_forward(x, rois) bboxes, scores = self.bbox_head.get_bboxes( rois, bbox_results['cls_score'], bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) trident_bboxes.append(bboxes) trident_scores.append(scores) aug_bboxes.append(torch.cat(trident_bboxes, 0)) aug_scores.append(torch.cat(trident_scores, 0)) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels
5,321
42.983471
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/dynamic_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.core import bbox2roi from mmdet.models.losses import SmoothL1Loss from ..builder import HEADS from .standard_roi_head import StandardRoIHead EPS = 1e-15 @HEADS.register_module() class DynamicRoIHead(StandardRoIHead): """RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_.""" def __init__(self, **kwargs): super(DynamicRoIHead, self).__init__(**kwargs) assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) # the IoU history of the past `update_iter_interval` iterations self.iou_history = [] # the beta history of the past `update_iter_interval` iterations self.beta_history = [] def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """Forward function for training. Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] cur_iou = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) # record the `iou_topk`-th largest IoU in an image iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, len(assign_result.max_overlaps)) ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) cur_iou.append(ious[-1].item()) sampling_results.append(sampling_result) # average the current IoUs over images cur_iou = np.mean(cur_iou) self.iou_history.append(cur_iou) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) # update IoU threshold and SmoothL1 beta update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval if len(self.iou_history) % update_iter_interval == 0: new_iou_thr, new_beta = self.update_hyperparameters() return losses def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): num_imgs = len(img_metas) rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) # record the `beta_topk`-th smallest target # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets # and bbox_weights, respectively pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) num_pos = len(pos_inds) cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, num_pos) cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() self.beta_history.append(cur_target) loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update(loss_bbox=loss_bbox) return bbox_results def update_hyperparameters(self): """Update hyperparameters like IoU thresholds for assigner and beta for SmoothL1 loss based on the training statistics. Returns: tuple[float]: the updated ``iou_thr`` and ``beta``. """ new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, np.mean(self.iou_history)) self.iou_history = [] self.bbox_assigner.pos_iou_thr = new_iou_thr self.bbox_assigner.neg_iou_thr = new_iou_thr self.bbox_assigner.min_pos_iou = new_iou_thr if (np.median(self.beta_history) < EPS): # avoid 0 or too small value for new_beta new_beta = self.bbox_head.loss_bbox.beta else: new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, np.median(self.beta_history)) self.beta_history = [] self.bbox_head.loss_bbox.beta = new_beta return new_iou_thr, new_beta
6,654
41.660256
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/point_rend_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa import os import warnings import numpy as np import torch import torch.nn.functional as F from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks from .. import builder from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class PointRendRoIHead(StandardRoIHead): """`PointRend <https://arxiv.org/abs/1912.08193>`_.""" def __init__(self, point_head, *args, **kwargs): super().__init__(*args, **kwargs) assert self.with_bbox and self.with_mask self.init_point_head(point_head) def init_point_head(self, point_head): """Initialize ``point_head``""" self.point_head = builder.build_head(point_head) def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for mask head and point head in training.""" mask_results = super()._mask_forward_train(x, sampling_results, bbox_feats, gt_masks, img_metas) if mask_results['loss_mask'] is not None: loss_point = self._mask_point_forward_train( x, sampling_results, mask_results['mask_pred'], gt_masks, img_metas) mask_results['loss_mask'].update(loss_point) return mask_results def _mask_point_forward_train(self, x, sampling_results, mask_pred, gt_masks, img_metas): """Run forward function and calculate loss for point head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) rel_roi_points = self.point_head.get_roi_rel_points_train( mask_pred, pos_labels, cfg=self.train_cfg) rois = bbox2roi([res.pos_bboxes for res in sampling_results]) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points, img_metas) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) mask_point_target = self.point_head.get_targets( rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg) loss_mask_point = self.point_head.loss(mask_point_pred, mask_point_target, pos_labels) return loss_mask_point def _get_fine_grained_point_feats(self, x, rois, rel_roi_points, img_metas): """Sample fine grained feats from each level feature map and concatenate them together. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid. img_metas (list[dict]): Image meta info. Returns: Tensor: The fine grained features for each points, has shape (num_rois, feats_channels, num_points). """ num_imgs = len(img_metas) fine_grained_feats = [] for idx in range(self.mask_roi_extractor.num_inputs): feats = x[idx] spatial_scale = 1. / float( self.mask_roi_extractor.featmap_strides[idx]) point_feats = [] for batch_ind in range(num_imgs): # unravel batch dim feat = feats[batch_ind].unsqueeze(0) inds = (rois[:, 0].long() == batch_ind) if inds.any(): rel_img_points = rel_roi_point_to_rel_img_point( rois[inds], rel_roi_points[inds], feat.shape[2:], spatial_scale).unsqueeze(0) point_feat = point_sample(feat, rel_img_points) point_feat = point_feat.squeeze(0).transpose(0, 1) point_feats.append(point_feat) fine_grained_feats.append(torch.cat(point_feats, dim=0)) return torch.cat(fine_grained_feats, dim=1) def _mask_point_forward_test(self, x, rois, label_pred, mask_pred, img_metas): """Mask refining process with point head in testing. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). label_pred (Tensor): The predication class for each rois. mask_pred (Tensor): The predication coarse masks of shape (num_rois, num_classes, small_size, small_size). img_metas (list[dict]): Image meta info. Returns: Tensor: The refined masks of shape (num_rois, num_classes, large_size, large_size). """ refined_mask_pred = mask_pred.clone() for subdivision_step in range(self.test_cfg.subdivision_steps): refined_mask_pred = F.interpolate( refined_mask_pred, scale_factor=self.test_cfg.scale_factor, mode='bilinear', align_corners=False) # If `subdivision_num_points` is larger or equal to the # resolution of the next step, then we can skip this step num_rois, channels, mask_height, mask_width = \ refined_mask_pred.shape if (self.test_cfg.subdivision_num_points >= self.test_cfg.scale_factor**2 * mask_height * mask_width and subdivision_step < self.test_cfg.subdivision_steps - 1): continue point_indices, rel_roi_points = \ self.point_head.get_roi_rel_points_test( refined_mask_pred, label_pred, cfg=self.test_cfg) fine_grained_point_feats = self._get_fine_grained_point_feats( x, rois, rel_roi_points, img_metas) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) refined_mask_pred = refined_mask_pred.reshape( num_rois, channels, mask_height * mask_width) refined_mask_pred = refined_mask_pred.scatter_( 2, point_indices, mask_point_pred) refined_mask_pred = refined_mask_pred.view(num_rois, channels, mask_height, mask_width) return refined_mask_pred def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Obtain mask prediction without augmentation.""" ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if isinstance(scale_factors[0], float): warnings.warn( 'Scale factor in img_metas should be a ' 'ndarray with shape (4,) ' 'arrange as (factor_w, factor_h, factor_w, factor_h), ' 'The scale_factor with float type has been deprecated. ') scale_factors = np.array([scale_factors] * 4, dtype=np.float32) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] if rescale: scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) # split batch mask prediction back to each image mask_pred = mask_results['mask_pred'] num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) mask_rois = mask_rois.split(num_mask_roi_per_img, 0) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: x_i = [xx[[i]] for xx in x] mask_rois_i = mask_rois[i] mask_rois_i[:, 0] = 0 # TODO: remove this hack mask_pred_i = self._mask_point_forward_test( x_i, mask_rois_i, det_labels[i], mask_preds[i], [img_metas]) segm_result = self.mask_head.get_seg_masks( mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) return segm_results def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) mask_results['mask_pred'] = self._mask_point_forward_test( x, mask_rois, det_labels, mask_results['mask_pred'], img_meta) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=1.0, rescale=False) return segm_result def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points): """Export the process of sampling fine grained feats to onnx. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid. Returns: Tensor: The fine grained features for each points, has shape (num_rois, feats_channels, num_points). """ batch_size = x[0].shape[0] num_rois = rois.shape[0] fine_grained_feats = [] for idx in range(self.mask_roi_extractor.num_inputs): feats = x[idx] spatial_scale = 1. / float( self.mask_roi_extractor.featmap_strides[idx]) rel_img_points = rel_roi_point_to_rel_img_point( rois, rel_roi_points, feats, spatial_scale) channels = feats.shape[1] num_points = rel_img_points.shape[1] rel_img_points = rel_img_points.reshape(batch_size, -1, num_points, 2) point_feats = point_sample(feats, rel_img_points) point_feats = point_feats.transpose(1, 2).reshape( num_rois, channels, num_points) fine_grained_feats.append(point_feats) return torch.cat(fine_grained_feats, dim=1) def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred): """Export mask refining process with point head to onnx. Args: x (tuple[Tensor]): Feature maps of all scale level. rois (Tensor): shape (num_rois, 5). label_pred (Tensor): The predication class for each rois. mask_pred (Tensor): The predication coarse masks of shape (num_rois, num_classes, small_size, small_size). Returns: Tensor: The refined masks of shape (num_rois, num_classes, large_size, large_size). """ refined_mask_pred = mask_pred.clone() for subdivision_step in range(self.test_cfg.subdivision_steps): refined_mask_pred = F.interpolate( refined_mask_pred, scale_factor=self.test_cfg.scale_factor, mode='bilinear', align_corners=False) # If `subdivision_num_points` is larger or equal to the # resolution of the next step, then we can skip this step num_rois, channels, mask_height, mask_width = \ refined_mask_pred.shape if (self.test_cfg.subdivision_num_points >= self.test_cfg.scale_factor**2 * mask_height * mask_width and subdivision_step < self.test_cfg.subdivision_steps - 1): continue point_indices, rel_roi_points = \ self.point_head.get_roi_rel_points_test( refined_mask_pred, label_pred, cfg=self.test_cfg) fine_grained_point_feats = self._onnx_get_fine_grained_point_feats( x, rois, rel_roi_points) coarse_point_feats = point_sample(mask_pred, rel_roi_points) mask_point_pred = self.point_head(fine_grained_point_feats, coarse_point_feats) point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) refined_mask_pred = refined_mask_pred.reshape( num_rois, channels, mask_height * mask_width) is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' # avoid ScatterElements op in ONNX for TensorRT if is_trt_backend: mask_shape = refined_mask_pred.shape point_shape = point_indices.shape inds_dim0 = torch.arange(point_shape[0]).reshape( point_shape[0], 1, 1).expand_as(point_indices) inds_dim1 = torch.arange(point_shape[1]).reshape( 1, point_shape[1], 1).expand_as(point_indices) inds_1d = inds_dim0.reshape( -1) * mask_shape[1] * mask_shape[2] + inds_dim1.reshape( -1) * mask_shape[2] + point_indices.reshape(-1) refined_mask_pred = refined_mask_pred.reshape(-1) refined_mask_pred[inds_1d] = mask_point_pred.reshape(-1) refined_mask_pred = refined_mask_pred.reshape(*mask_shape) else: refined_mask_pred = refined_mask_pred.scatter_( 2, point_indices, mask_point_pred) refined_mask_pred = refined_mask_pred.view(num_rois, channels, mask_height, mask_width) return refined_mask_pred def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): """Export mask branch to onnx which supports batch inference. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. det_bboxes (Tensor): Bboxes and corresponding scores. has shape [N, num_bboxes, 5]. det_labels (Tensor): class labels of shape [N, num_bboxes]. Returns: Tensor: The segmentation results of shape [N, num_bboxes, image_height, image_width]. """ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): raise RuntimeError('[ONNX Error] Can not record MaskHead ' 'as it has not been executed this time') batch_size = det_bboxes.size(0) # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. det_bboxes = det_bboxes[..., :4] batch_index = torch.arange( det_bboxes.size(0), device=det_bboxes.device).float().view( -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) mask_rois = mask_rois.view(-1, 5) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] max_shape = img_metas[0]['img_shape_for_onnx'] num_det = det_bboxes.shape[1] det_bboxes = det_bboxes.reshape(-1, 4) det_labels = det_labels.reshape(-1) mask_pred = self._mask_point_onnx_export(x, mask_rois, det_labels, mask_pred) segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, det_labels, self.test_cfg, max_shape) segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], max_shape[1]) return segm_results
18,743
46.573604
101
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/base_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from mmcv.runner import BaseModule from ..builder import build_shared_head class BaseRoIHead(BaseModule, metaclass=ABCMeta): """Base class for RoIHeads.""" def __init__(self, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(BaseRoIHead, self).__init__(init_cfg) self.train_cfg = train_cfg self.test_cfg = test_cfg if shared_head is not None: shared_head.pretrained = pretrained self.shared_head = build_shared_head(shared_head) if bbox_head is not None: self.init_bbox_head(bbox_roi_extractor, bbox_head) if mask_head is not None: self.init_mask_head(mask_roi_extractor, mask_head) self.init_assigner_sampler() @property def with_bbox(self): """bool: whether the RoI head contains a `bbox_head`""" return hasattr(self, 'bbox_head') and self.bbox_head is not None @property def with_mask(self): """bool: whether the RoI head contains a `mask_head`""" return hasattr(self, 'mask_head') and self.mask_head is not None @property def with_shared_head(self): """bool: whether the RoI head contains a `shared_head`""" return hasattr(self, 'shared_head') and self.shared_head is not None @abstractmethod def init_bbox_head(self): """Initialize ``bbox_head``""" pass @abstractmethod def init_mask_head(self): """Initialize ``mask_head``""" pass @abstractmethod def init_assigner_sampler(self): """Initialize assigner and sampler.""" pass @abstractmethod def forward_train(self, x, img_meta, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, **kwargs): """Forward function during training.""" async def async_simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False, **kwargs): """Asynchronized test function.""" raise NotImplementedError def simple_test(self, x, proposal_list, img_meta, proposals=None, rescale=False, **kwargs): """Test without augmentation.""" def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """
3,197
29.75
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_scoring_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import bbox2roi from ..builder import HEADS, build_head from .standard_roi_head import StandardRoIHead @HEADS.register_module() class MaskScoringRoIHead(StandardRoIHead): """Mask Scoring RoIHead for Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, mask_iou_head, **kwargs): assert mask_iou_head is not None super(MaskScoringRoIHead, self).__init__(**kwargs) self.mask_iou_head = build_head(mask_iou_head) def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): """Run forward function and calculate loss for Mask head in training.""" pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) mask_results = super(MaskScoringRoIHead, self)._mask_forward_train(x, sampling_results, bbox_feats, gt_masks, img_metas) if mask_results['loss_mask'] is None: return mask_results # mask iou head forward and loss pos_mask_pred = mask_results['mask_pred'][ range(mask_results['mask_pred'].size(0)), pos_labels] mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'], pos_mask_pred) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels] mask_iou_targets = self.mask_iou_head.get_targets( sampling_results, gt_masks, pos_mask_pred, mask_results['mask_targets'], self.train_cfg) loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets) mask_results['loss_mask'].update(loss_mask_iou) return mask_results def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Obtain mask prediction without augmentation.""" # image shapes of images in the batch ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): num_classes = self.mask_head.num_classes segm_results = [[[] for _ in range(num_classes)] for _ in range(num_imgs)] mask_scores = [[[] for _ in range(num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) concat_det_labels = torch.cat(det_labels) # get mask scores with mask iou head mask_feats = mask_results['mask_feats'] mask_pred = mask_results['mask_pred'] mask_iou_pred = self.mask_iou_head( mask_feats, mask_pred[range(concat_det_labels.size(0)), concat_det_labels]) # split batch mask prediction back to each image num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes) mask_preds = mask_pred.split(num_bboxes_per_img, 0) mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0) # apply mask post-processing to each image individually segm_results = [] mask_scores = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) mask_scores.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) # get mask scores with mask iou head mask_score = self.mask_iou_head.get_mask_scores( mask_iou_preds[i], det_bboxes[i], det_labels[i]) segm_results.append(segm_result) mask_scores.append(mask_score) return list(zip(segm_results, mask_scores))
5,230
44.885965
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .base_roi_head import BaseRoIHead from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead, DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead, Shared2FCBBoxHead, Shared4Conv1FCBBoxHead) from .cascade_roi_head import CascadeRoIHead from .double_roi_head import DoubleHeadRoIHead from .dynamic_roi_head import DynamicRoIHead from .grid_roi_head import GridRoIHead from .htc_roi_head import HybridTaskCascadeRoIHead from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead, FusedSemanticHead, GlobalContextHead, GridHead, HTCMaskHead, MaskIoUHead, MaskPointHead, SCNetMaskHead, SCNetSemanticHead) from .mask_scoring_roi_head import MaskScoringRoIHead from .pisa_roi_head import PISARoIHead from .point_rend_roi_head import PointRendRoIHead from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor, SingleRoIExtractor) from .scnet_roi_head import SCNetRoIHead from .shared_heads import ResLayer from .sparse_roi_head import SparseRoIHead from .standard_roi_head import StandardRoIHead from .trident_roi_head import TridentRoIHead __all__ = [ 'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead', 'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead', 'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead', 'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor', 'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead', 'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead', 'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead', 'FeatureRelayHead', 'GlobalContextHead' ]
1,961
50.631579
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/htc_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from ..builder import HEADS, build_head, build_roi_extractor from ..utils.brick_wrappers import adaptive_avg_pool2d from .cascade_roi_head import CascadeRoIHead @HEADS.register_module() class HybridTaskCascadeRoIHead(CascadeRoIHead): """Hybrid task cascade roi head including one bbox head and one mask head. https://arxiv.org/abs/1901.07518 """ def __init__(self, num_stages, stage_loss_weights, semantic_roi_extractor=None, semantic_head=None, semantic_fusion=('bbox', 'mask'), interleaved=True, mask_info_flow=True, **kwargs): super(HybridTaskCascadeRoIHead, self).__init__(num_stages, stage_loss_weights, **kwargs) assert self.with_bbox assert not self.with_shared_head # shared head is not supported if semantic_head is not None: self.semantic_roi_extractor = build_roi_extractor( semantic_roi_extractor) self.semantic_head = build_head(semantic_head) self.semantic_fusion = semantic_fusion self.interleaved = interleaved self.mask_info_flow = mask_info_flow @property def with_semantic(self): """bool: whether the head has semantic head""" if hasattr(self, 'semantic_head') and self.semantic_head is not None: return True else: return False def forward_dummy(self, x, proposals): """Dummy forward function.""" outs = () # semantic head if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None # bbox heads rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) # mask heads if self.with_mask: mask_rois = rois[:100] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) outs = outs + (mask_pred, ) return outs def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None): """Run forward function and calculate loss for box head in training.""" bbox_head = self.bbox_head[stage] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward( stage, x, rois, semantic_feat=semantic_feat) bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(bbox_results['cls_score'], bbox_results['bbox_pred'], rois, *bbox_targets) bbox_results.update( loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets, ) return bbox_results def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None): """Run forward function and calculate loss for mask head in training.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], pos_rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], pos_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat # mask information flow # forward all previous mask heads to obtain last_feat, and fuse it # with the normal mask feature if self.mask_info_flow: last_feat = None for i in range(stage): last_feat = self.mask_head[i]( mask_feats, last_feat, return_logits=False) mask_pred = mask_head(mask_feats, last_feat, return_feat=False) else: mask_pred = mask_head(mask_feats, return_feat=False) mask_targets = mask_head.get_targets(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) mask_results = dict(loss_mask=loss_mask) return mask_results def _bbox_forward(self, stage, x, rois, semantic_feat=None): """Box head forward function used in both training and testing.""" bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) return bbox_results def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): """Mask head forward function for testing.""" mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_rois = bbox2roi([bboxes]) mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat if self.mask_info_flow: last_feat = None last_pred = None for i in range(stage): mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) if last_pred is not None: mask_pred = mask_pred + last_pred last_pred = mask_pred mask_pred = mask_head(mask_feats, last_feat, return_feat=False) if last_pred is not None: mask_pred = mask_pred + last_pred else: mask_pred = mask_head(mask_feats) return mask_pred def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None): """ Args: x (list[Tensor]): list of multi-level img features. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposal_list (list[Tensors]): list of region proposals. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None, list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None, Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. gt_semantic_seg (None, list[Tensor]): semantic segmentation masks used if the architecture supports semantic segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # semantic segmentation part # 2 outputs: segmentation prediction and embedded features losses = dict() if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg[i] lw = self.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = self.bbox_assigner[i] bbox_sampler = self.bbox_sampler[i] num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_results = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat) roi_labels = bbox_results['bbox_targets'][0] for name, value in bbox_results['loss_bbox'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: # interleaved execution: use regressed bboxes by the box branch # to train the mask branch if self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) # re-assign and sample 512 RoIs from 512 RoIs sampling_results = [] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) mask_results = self._mask_forward_train( i, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat) for name, value in mask_results['loss_mask'].items(): losses[f's{i}.{name}'] = ( value * lw if 'loss' in name else value) # refine bboxes (same as Cascade R-CNN) if i < self.num_stages - 1 and not self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( bbox_results['rois'], roi_labels, bbox_results['bbox_pred'], pos_is_gts, img_metas) return losses def simple_test(self, x, proposal_list, img_metas, rescale=False): """Test without augmentation. Args: x (tuple[Tensor]): Features from upstream network. Each has shape (batch_size, c, h, w). proposal_list (list(Tensor)): Proposals from rpn head. Each has shape (num_proposals, 5), last dimension 5 represent (x1, y1, x2, y2, score). img_metas (list[dict]): Meta information of images. rescale (bool): Whether to rescale the results to the original image. Default: True. Returns: list[list[np.ndarray]] or list[tuple]: When no mask branch, it is bbox results of each image and classes with type `list[list[np.ndarray]]`. The outer list corresponds to each image. The inner list corresponds to each class. When the model has mask branch, it contains bbox results and mask results. The outer list corresponds to each image, and first element of tuple is bbox results, second element is mask results. """ if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None num_imgs = len(proposal_list) img_shapes = tuple(meta['img_shape'] for meta in img_metas) ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg rois = bbox2roi(proposal_list) if rois.shape[0] == 0: # There is no proposal in the whole batch bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic_feat) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposal_list) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) bbox_pred = bbox_pred.split(num_proposals_per_img, 0) ms_scores.append(cls_score) if i < self.num_stages - 1: refine_rois_list = [] for j in range(num_imgs): if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_rois = bbox_head.regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_rois_list.append(refine_rois) rois = torch.cat(refine_rois_list) # average scores of each image by stages cls_score = [ sum([score[i] for score in ms_scores]) / float(len(ms_scores)) for i in range(num_imgs) ] # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(num_imgs): det_bbox, det_label = self.bbox_head[-1].get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) bbox_result = [ bbox2result(det_bboxes[i], det_labels[i], self.bbox_head[-1].num_classes) for i in range(num_imgs) ] ms_bbox_result['ensemble'] = bbox_result if self.with_mask: if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] else: if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i] for i in range(num_imgs) ] mask_rois = bbox2roi(_bboxes) aug_masks = [] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats += mask_semantic_feat last_feat = None num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) # split batch mask prediction back to each image mask_pred = mask_pred.split(num_bbox_per_img, 0) aug_masks.append( [mask.sigmoid().cpu().numpy() for mask in mask_pred]) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head[-1].num_classes)]) else: aug_mask = [mask[i] for mask in aug_masks] merged_mask = merge_aug_masks( aug_mask, [[img_metas[i]]] * self.num_stages, rcnn_test_cfg) segm_result = self.mask_head[-1].get_seg_masks( merged_mask, _bboxes[i], det_labels[i], rcnn_test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) ms_segm_result['ensemble'] = segm_results if self.with_mask: results = list( zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) else: results = ms_bbox_result['ensemble'] return results def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in img_feats ] else: semantic_feats = [None] * len(img_metas) rcnn_test_cfg = self.test_cfg aug_bboxes = [] aug_scores = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) if rois.shape[0] == 0: # There is no proposal in the single image aug_bboxes.append(rois.new_zeros(0, 4)) aug_scores.append(rois.new_zeros(0, 1)) continue for i in range(self.num_stages): bbox_head = self.bbox_head[i] bbox_results = self._bbox_forward( i, x, rois, semantic_feat=semantic) ms_scores.append(bbox_results['cls_score']) if i < self.num_stages - 1: bbox_label = bbox_results['cls_score'].argmax(dim=1) rois = bbox_head.regress_by_class( rois, bbox_label, bbox_results['bbox_pred'], img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_bboxes( rois, cls_score, bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head[-1].num_classes)] else: aug_masks = [] aug_img_metas = [] for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor[-1]( x[:len(self.mask_roi_extractor[-1].featmap_strides)], mask_rois) if self.with_semantic: semantic_feat = semantic mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[ -2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head( mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return [(bbox_result, segm_result)] else: return [bbox_result]
27,630
42.928458
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/pisa_roi_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.core import bbox2roi from ..builder import HEADS from ..losses.pisa_loss import carl_loss, isr_p from .standard_roi_head import StandardRoIHead @HEADS.register_module() class PISARoIHead(StandardRoIHead): r"""The RoI head for `Prime Sample Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.""" def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """Forward function for training. Args: x (list[Tensor]): List of multi-level img features. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. proposals (list[Tensors]): List of region proposals. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (list[Tensor], optional): Specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : True segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # assign gts and sample proposals if self.with_bbox or self.with_mask: num_imgs = len(img_metas) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] neg_label_weights = [] for i in range(num_imgs): assign_result = self.bbox_assigner.assign( proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = self.bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) # neg label weight is obtained by sampling when using ISR-N neg_label_weight = None if isinstance(sampling_result, tuple): sampling_result, neg_label_weight = sampling_result sampling_results.append(sampling_result) neg_label_weights.append(neg_label_weight) losses = dict() # bbox head forward and loss if self.with_bbox: bbox_results = self._bbox_forward_train( x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=neg_label_weights) losses.update(bbox_results['loss_bbox']) # mask head forward and loss if self.with_mask: mask_results = self._mask_forward_train(x, sampling_results, bbox_results['bbox_feats'], gt_masks, img_metas) losses.update(mask_results['loss_mask']) return losses def _bbox_forward(self, x, rois): """Box forward function used in both training and testing.""" # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) return bbox_results def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas, neg_label_weights=None): """Run forward function and calculate loss for box head in training.""" rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_results = self._bbox_forward(x, rois) bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, gt_labels, self.train_cfg) # neg_label_weights obtained by sampler is image-wise, mapping back to # the corresponding location in label weights if neg_label_weights[0] is not None: label_weights = bbox_targets[1] cur_num_rois = 0 for i in range(len(sampling_results)): num_pos = sampling_results[i].pos_inds.size(0) num_neg = sampling_results[i].neg_inds.size(0) label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos + num_neg] = neg_label_weights[i] cur_num_rois += num_pos + num_neg cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: bbox_targets = isr_p( cls_score, bbox_pred, bbox_targets, rois, sampling_results, self.bbox_head.loss_cls, self.bbox_head.bbox_coder, **isr_cfg, num_class=self.bbox_head.num_classes) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, *bbox_targets) # Add CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( cls_score, bbox_targets[0], bbox_pred, bbox_targets[2], self.bbox_head.loss_bbox, **carl_cfg, num_class=self.bbox_head.num_classes) loss_bbox.update(loss_carl) bbox_results.update(loss_bbox=loss_bbox) return bbox_results
6,656
40.347826
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/test_mixins.py
# Copyright (c) OpenMMLab. All rights reserved. import sys import warnings import numpy as np import torch from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms) if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin: if sys.version_info >= (3, 7): async def async_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False, **kwargs): """Asynchronized test for box head without augmentation.""" rois = bbox2roi(proposals) roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) async with completed( __name__, 'bbox_head_forward', sleep_interval=sleep_interval): cls_score, bbox_pred = self.bbox_head(roi_feats) img_shape = img_metas[0]['img_shape'] scale_factor = img_metas[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels def simple_test_bboxes(self, x, img_metas, proposals, rcnn_test_cfg, rescale=False): """Test only det bboxes without augmentation. Args: x (tuple[Tensor]): Feature maps of all scale level. img_metas (list[dict]): Image meta info. proposals (List[Tensor]): Region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[list[Tensor], list[Tensor]]: The first list contains the boxes of the corresponding image in a batch, each tensor has the shape (num_boxes, 5) and last dimension 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor in the second list is the labels with shape (num_boxes, ). The length of both lists should be equal to batch_size. """ rois = bbox2roi(proposals) if rois.shape[0] == 0: batch_size = len(proposals) det_bbox = rois.new_zeros(0, 5) det_label = rois.new_zeros((0, ), dtype=torch.long) if rcnn_test_cfg is None: det_bbox = det_bbox[:, :4] det_label = rois.new_zeros( (0, self.bbox_head.fc_cls.out_features)) # There is no proposal in the whole batch return [det_bbox] * batch_size, [det_label] * batch_size bbox_results = self._bbox_forward(x, rois) img_shapes = tuple(meta['img_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) # split batch bbox prediction back to each image cls_score = bbox_results['cls_score'] bbox_pred = bbox_results['bbox_pred'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_score = cls_score.split(num_proposals_per_img, 0) # some detector with_reg is False, bbox_pred will be None if bbox_pred is not None: # TODO move this to a sabl_roi_head # the bbox prediction of some detectors like SABL is not Tensor if isinstance(bbox_pred, torch.Tensor): bbox_pred = bbox_pred.split(num_proposals_per_img, 0) else: bbox_pred = self.bbox_head.bbox_pred_split( bbox_pred, num_proposals_per_img) else: bbox_pred = (None, ) * len(proposals) # apply bbox post-processing to each image individually det_bboxes = [] det_labels = [] for i in range(len(proposals)): if rois[i].shape[0] == 0: # There is no proposal in the single image det_bbox = rois[i].new_zeros(0, 5) det_label = rois[i].new_zeros((0, ), dtype=torch.long) if rcnn_test_cfg is None: det_bbox = det_bbox[:, :4] det_label = rois[i].new_zeros( (0, self.bbox_head.fc_cls.out_features)) else: det_bbox, det_label = self.bbox_head.get_bboxes( rois[i], cls_score[i], bbox_pred[i], img_shapes[i], scale_factors[i], rescale=rescale, cfg=rcnn_test_cfg) det_bboxes.append(det_bbox) det_labels.append(det_label) return det_bboxes, det_labels def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): """Test det bboxes with test time augmentation.""" aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] # TODO more flexible proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip, flip_direction) rois = bbox2roi([proposals]) bbox_results = self._bbox_forward(x, rois) bboxes, scores = self.bbox_head.get_bboxes( rois, bbox_results['cls_score'], bbox_results['bbox_pred'], img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) if merged_bboxes.shape[0] == 0: # There is no proposal in the single image det_bboxes = merged_bboxes.new_zeros(0, 5) det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long) else: det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels class MaskTestMixin: if sys.version_info >= (3, 7): async def async_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False, mask_test_cfg=None): """Asynchronized test for mask head without augmentation.""" # image shape of the first image in the batch (only one) ori_shape = img_metas[0]['ori_shape'] scale_factor = img_metas[0]['scale_factor'] if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: if rescale and not isinstance(scale_factor, (float, torch.Tensor)): scale_factor = det_bboxes.new_tensor(scale_factor) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'): sleep_interval = mask_test_cfg['async_sleep_interval'] else: sleep_interval = 0.035 async with completed( __name__, 'mask_head_forward', sleep_interval=sleep_interval): mask_pred = self.mask_head(mask_feats) segm_result = self.mask_head.get_seg_masks( mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, scale_factor, rescale) return segm_result def simple_test_mask(self, x, img_metas, det_bboxes, det_labels, rescale=False): """Simple test for mask head without augmentation.""" # image shapes of images in the batch ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if isinstance(scale_factors[0], float): warnings.warn( 'Scale factor in img_metas should be a ' 'ndarray with shape (4,) ' 'arrange as (factor_w, factor_h, factor_w, factor_h), ' 'The scale_factor with float type has been deprecated. ') scale_factors = np.array([scale_factors] * 4, dtype=np.float32) num_imgs = len(det_bboxes) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale: scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_rois = bbox2roi(_bboxes) mask_results = self._mask_forward(x, mask_rois) mask_pred = mask_results['mask_pred'] # split batch mask prediction back to each image num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] mask_preds = mask_pred.split(num_mask_roi_per_img, 0) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], _bboxes[i], det_labels[i], self.test_cfg, ori_shapes[i], scale_factors[i], rescale) segm_results.append(segm_result) return segm_results def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): """Test for mask head with test time augmentation.""" if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] flip_direction = img_meta[0]['flip_direction'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip, flip_direction) mask_rois = bbox2roi([_bboxes]) mask_results = self._mask_forward(x, mask_rois) # convert to numpy array to save memory aug_masks.append( mask_results['mask_pred'].sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) ori_shape = img_metas[0][0]['ori_shape'] scale_factor = det_bboxes.new_ones(4) segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg, ori_shape, scale_factor=scale_factor, rescale=False) return segm_result
13,557
42.455128
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch import torch.nn as nn from mmcv import ops from mmcv.runner import BaseModule class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): """Base class for RoI extractor. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (int): Strides of input feature maps. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, roi_layer, out_channels, featmap_strides, init_cfg=None): super(BaseRoIExtractor, self).__init__(init_cfg) self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) self.out_channels = out_channels self.featmap_strides = featmap_strides self.fp16_enabled = False @property def num_inputs(self): """int: Number of input feature maps.""" return len(self.featmap_strides) def build_roi_layers(self, layer_cfg, featmap_strides): """Build RoI operator to extract feature from each level feature map. Args: layer_cfg (dict): Dictionary to construct and config RoI layer operation. Options are modules under ``mmcv/ops`` such as ``RoIAlign``. featmap_strides (List[int]): The stride of input feature map w.r.t to the original image size, which would be used to scale RoI coordinate (original image coordinate system) to feature coordinate system. Returns: nn.ModuleList: The RoI extractor modules for each level feature map. """ cfg = layer_cfg.copy() layer_type = cfg.pop('type') assert hasattr(ops, layer_type) layer_cls = getattr(ops, layer_type) roi_layers = nn.ModuleList( [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) return roi_layers def roi_rescale(self, rois, scale_factor): """Scale RoI coordinates by scale factor. Args: rois (torch.Tensor): RoI (Region of Interest), shape (n, 5) scale_factor (float): Scale factor that RoI will be multiplied by. Returns: torch.Tensor: Scaled RoI. """ cx = (rois[:, 1] + rois[:, 3]) * 0.5 cy = (rois[:, 2] + rois[:, 4]) * 0.5 w = rois[:, 3] - rois[:, 1] h = rois[:, 4] - rois[:, 2] new_w = w * scale_factor new_h = h * scale_factor x1 = cx - new_w * 0.5 x2 = cx + new_w * 0.5 y1 = cy - new_h * 0.5 y2 = cy + new_h * 0.5 new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) return new_rois @abstractmethod def forward(self, feats, rois, roi_scale_factor=None): pass
3,002
32.741573
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/roi_extractors/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .base_roi_extractor import BaseRoIExtractor from .generic_roi_extractor import GenericRoIExtractor from .single_level_roi_extractor import SingleRoIExtractor __all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']
288
40.285714
75
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class SingleRoIExtractor(BaseRoIExtractor): """Extract RoI features from a single level feature map. If there are multiple input feature levels, each RoI is mapped to a level according to its scale. The mapping rule is proposed in `FPN <https://arxiv.org/abs/1612.03144>`_. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (List[int]): Strides of input feature maps. finest_scale (int): Scale threshold of mapping to level 0. Default: 56. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, roi_layer, out_channels, featmap_strides, finest_scale=56, init_cfg=None): super(SingleRoIExtractor, self).__init__(roi_layer, out_channels, featmap_strides, init_cfg) self.finest_scale = finest_scale def map_roi_levels(self, rois, num_levels): """Map rois to corresponding feature levels by scales. - scale < finest_scale * 2: level 0 - finest_scale * 2 <= scale < finest_scale * 4: level 1 - finest_scale * 4 <= scale < finest_scale * 8: level 2 - scale >= finest_scale * 8: level 3 Args: rois (Tensor): Input RoIs, shape (k, 5). num_levels (int): Total level number. Returns: Tensor: Level index (0-based) of each RoI, shape (k, ) """ scale = torch.sqrt( (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])) target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() return target_lvls @force_fp32(apply_to=('feats', ), out_fp16=True) def forward(self, feats, rois, roi_scale_factor=None): """Forward function.""" out_size = self.roi_layers[0].output_size num_levels = len(feats) expand_dims = (-1, self.out_channels * out_size[0] * out_size[1]) if torch.onnx.is_in_onnx_export(): # Work around to export mask-rcnn to onnx roi_feats = rois[:, :1].clone().detach() roi_feats = roi_feats.expand(*expand_dims) roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size) roi_feats = roi_feats * 0 else: roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # TODO: remove this when parrots supports if torch.__version__ == 'parrots': roi_feats.requires_grad = True if num_levels == 1: if len(rois) == 0: return roi_feats return self.roi_layers[0](feats[0], rois) target_lvls = self.map_roi_levels(rois, num_levels) if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) for i in range(num_levels): mask = target_lvls == i if torch.onnx.is_in_onnx_export(): # To keep all roi_align nodes exported to onnx # and skip nonzero op mask = mask.float().unsqueeze(-1) # select target level rois and reset the rest rois to zero. rois_i = rois.clone().detach() rois_i *= mask mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape) roi_feats_t = self.roi_layers[i](feats[i], rois_i) roi_feats_t *= mask_exp roi_feats += roi_feats_t continue inds = mask.nonzero(as_tuple=False).squeeze(1) if inds.numel() > 0: rois_ = rois[inds] roi_feats_t = self.roi_layers[i](feats[i], rois_) roi_feats[inds] = roi_feats_t else: # Sometimes some pyramid levels will not be used for RoI # feature extraction and this will cause an incomplete # computation graph in one GPU, which is different from those # in other GPUs and will cause a hanging error. # Therefore, we add it to ensure each feature pyramid is # included in the computation graph to avoid runtime bugs. roi_feats += sum( x.view(-1)[0] for x in self.parameters()) * 0. + feats[i].sum() * 0. return roi_feats
4,829
40.637931
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn.bricks import build_plugin_layer from mmcv.runner import force_fp32 from mmdet.models.builder import ROI_EXTRACTORS from .base_roi_extractor import BaseRoIExtractor @ROI_EXTRACTORS.register_module() class GenericRoIExtractor(BaseRoIExtractor): """Extract RoI features from all level feature maps levels. This is the implementation of `A novel Region of Interest Extraction Layer for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_. Args: aggregation (str): The method to aggregate multiple feature maps. Options are 'sum', 'concat'. Default: 'sum'. pre_cfg (dict | None): Specify pre-processing modules. Default: None. post_cfg (dict | None): Specify post-processing modules. Default: None. kwargs (keyword arguments): Arguments that are the same as :class:`BaseRoIExtractor`. """ def __init__(self, aggregation='sum', pre_cfg=None, post_cfg=None, **kwargs): super(GenericRoIExtractor, self).__init__(**kwargs) assert aggregation in ['sum', 'concat'] self.aggregation = aggregation self.with_post = post_cfg is not None self.with_pre = pre_cfg is not None # build pre/post processing modules if self.with_post: self.post_module = build_plugin_layer(post_cfg, '_post_module')[1] if self.with_pre: self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1] @force_fp32(apply_to=('feats', ), out_fp16=True) def forward(self, feats, rois, roi_scale_factor=None): """Forward function.""" if len(feats) == 1: return self.roi_layers[0](feats[0], rois) out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) # some times rois is an empty tensor if roi_feats.shape[0] == 0: return roi_feats if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) # mark the starting channels for concat mode start_channels = 0 for i in range(num_levels): roi_feats_t = self.roi_layers[i](feats[i], rois) end_channels = start_channels + roi_feats_t.size(1) if self.with_pre: # apply pre-processing to a RoI extracted from each layer roi_feats_t = self.pre_module(roi_feats_t) if self.aggregation == 'sum': # and sum them all roi_feats += roi_feats_t else: # and concat them along channel dimension roi_feats[:, start_channels:end_channels] = roi_feats_t # update channels starting position start_channels = end_channels # check if concat channels match at the end if self.aggregation == 'concat': assert start_channels == self.out_channels if self.with_post: # apply post-processing before return the result roi_feats = self.post_module(roi_feats) return roi_feats
3,271
37.494118
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from .convfc_bbox_head import ConvFCBBoxHead @HEADS.register_module() class SCNetBBoxHead(ConvFCBBoxHead): """BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_. This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us to get intermediate shared feature. """ def _forward_shared(self, x): """Forward function for shared part.""" if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) return x def _forward_cls_reg(self, x): """Forward function for classification and regression parts.""" x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred def forward(self, x, return_shared_feat=False): """Forward function. Args: x (Tensor): input features return_shared_feat (bool): If True, return cls-reg-shared feature. Return: out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``, if ``return_shared_feat`` is True, append ``x_shared`` to the returned tuple. """ x_shared = self._forward_shared(x) out = self._forward_cls_reg(x_shared) if return_shared_feat: out += (x_shared, ) return out
2,307
28.589744
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.runner import BaseModule, auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy from mmdet.models.utils import build_linear_layer @HEADS.register_module() class BBoxHead(BaseModule): """Simplest RoI head, with only two fc layers for classification and regression respectively.""" def __init__(self, with_avg_pool=False, with_cls=True, with_reg=True, roi_feat_size=7, in_channels=256, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, reg_decoded_bbox=False, reg_predictor_cfg=dict(type='Linear'), cls_predictor_cfg=dict(type='Linear'), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg=None): super(BBoxHead, self).__init__(init_cfg) assert with_cls or with_reg self.with_avg_pool = with_avg_pool self.with_cls = with_cls self.with_reg = with_reg self.roi_feat_size = _pair(roi_feat_size) self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] self.in_channels = in_channels self.num_classes = num_classes self.reg_class_agnostic = reg_class_agnostic self.reg_decoded_bbox = reg_decoded_bbox self.reg_predictor_cfg = reg_predictor_cfg self.cls_predictor_cfg = cls_predictor_cfg self.fp16_enabled = False self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) in_channels = self.in_channels if self.with_avg_pool: self.avg_pool = nn.AvgPool2d(self.roi_feat_size) else: in_channels *= self.roi_feat_area if self.with_cls: # need to add background class if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = num_classes + 1 self.fc_cls = build_linear_layer( self.cls_predictor_cfg, in_features=in_channels, out_features=cls_channels) if self.with_reg: out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes self.fc_reg = build_linear_layer( self.reg_predictor_cfg, in_features=in_channels, out_features=out_dim_reg) self.debug_imgs = None if init_cfg is None: self.init_cfg = [] if self.with_cls: self.init_cfg += [ dict( type='Normal', std=0.01, override=dict(name='fc_cls')) ] if self.with_reg: self.init_cfg += [ dict( type='Normal', std=0.001, override=dict(name='fc_reg')) ] @property def custom_cls_channels(self): return getattr(self.loss_cls, 'custom_cls_channels', False) @property def custom_activation(self): return getattr(self.loss_cls, 'custom_activation', False) @property def custom_accuracy(self): return getattr(self.loss_cls, 'custom_accuracy', False) @auto_fp16() def forward(self, x): if self.with_avg_pool: if x.numel() > 0: x = self.avg_pool(x) x = x.view(x.size(0), -1) else: # avg_pool does not support empty tensor, # so use torch.mean instead it x = torch.mean(x, dim=(-1, -2)) cls_score = self.fc_cls(x) if self.with_cls else None bbox_pred = self.fc_reg(x) if self.with_reg else None return cls_score, bbox_pred def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes, pos_gt_labels, cfg): """Calculate the ground truth for proposals in the single image according to the sampling results. Args: pos_bboxes (Tensor): Contains all the positive boxes, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. neg_bboxes (Tensor): Contains all the negative boxes, has shape (num_neg, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_bboxes (Tensor): Contains gt_boxes for all positive samples, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_labels (Tensor): Contains gt_labels for all positive samples, has shape (num_pos, ). cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following Tensors: - labels(Tensor): Gt_labels for all proposals, has shape (num_proposals,). - label_weights(Tensor): Labels_weights for all proposals, has shape (num_proposals,). - bbox_targets(Tensor):Regression target for all proposals, has shape (num_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights(Tensor):Regression weights for all proposals, has shape (num_proposals, 4). """ num_pos = pos_bboxes.size(0) num_neg = neg_bboxes.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_bboxes.new_zeros(num_samples) bbox_targets = pos_bboxes.new_zeros(num_samples, 4) bbox_weights = pos_bboxes.new_zeros(num_samples, 4) if num_pos > 0: labels[:num_pos] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[:num_pos] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_bboxes, pos_gt_bboxes) else: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, both # the predicted boxes and regression targets should be with # absolute coordinate format. pos_bbox_targets = pos_gt_bboxes bbox_targets[:num_pos, :] = pos_bbox_targets bbox_weights[:num_pos, :] = 1 if num_neg > 0: label_weights[-num_neg:] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, concat=True): """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_target_single` function. Args: sampling_results (List[obj:SamplingResults]): Assign results of all images in a batch after sampling. gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, each tensor has shape (num_gt, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. gt_labels (list[Tensor]): Gt_labels of all images in a batch, each tensor has shape (num_gt,). rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ pos_bboxes_list = [res.pos_bboxes for res in sampling_results] neg_bboxes_list = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_target_single, pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights @force_fp32(apply_to=('cls_score', 'bbox_pred')) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) if cls_score.numel() > 0: loss_cls_ = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) if isinstance(loss_cls_, dict): losses.update(loss_cls_) else: losses['loss_cls'] = loss_cls_ if self.custom_activation: acc_ = self.loss_cls.get_accuracy(cls_score, labels) losses.update(acc_) else: losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bg_class_ind = self.num_classes # 0~self.num_classes-1 are FG, self.num_classes is BG pos_inds = (labels >= 0) & (labels < bg_class_ind) # do not perform bounding box regression for BG anymore. if pos_inds.any(): if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, # `GIouLoss`, `DIouLoss`) is applied directly on # the decoded bounding boxes, it decodes the # already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) if self.reg_class_agnostic: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] else: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), -1, 4)[pos_inds.type(torch.bool), labels[pos_inds.type(torch.bool)]] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=bbox_targets.size(0), reduction_override=reduction_override) else: losses['loss_bbox'] = bbox_pred[pos_inds].sum() return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): """Transform network output for a batch into bbox predictions. Args: rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor, optional): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_shape (Sequence[int], optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). rescale (bool): If True, return boxes in original image space. Default: False. cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None Returns: tuple[Tensor, Tensor]: First tensor is `det_bboxes`, has the shape (num_boxes, 5) and last dimension 5 represent (tl_x, tl_y, br_x, br_y, score). Second tensor is the labels with shape (num_boxes, ). """ # some loss (Seesaw loss..) may have custom activation if self.custom_cls_channels: scores = self.loss_cls.get_activation(cls_score) else: scores = F.softmax( cls_score, dim=-1) if cls_score is not None else None # bbox_pred would be None in some detector when with_reg is False, # e.g. Grid R-CNN. if bbox_pred is not None: bboxes = self.bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=img_shape) else: bboxes = rois[:, 1:].clone() if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) if rescale and bboxes.size(0) > 0: scale_factor = bboxes.new_tensor(scale_factor) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms(bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. The first column is the image id and the next 4 columns are x1, y1, x2, y2. labels (Tensor): Shape (n*bs, ). bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. Example: >>> # xdoctest: +REQUIRES(module:kwarray) >>> import kwarray >>> import numpy as np >>> from mmdet.core.bbox.demodata import random_boxes >>> self = BBoxHead(reg_class_agnostic=True) >>> n_roi = 2 >>> n_img = 4 >>> scale = 512 >>> rng = np.random.RandomState(0) >>> img_metas = [{'img_shape': (scale, scale)} ... for _ in range(n_img)] >>> # Create rois in the expected format >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) >>> img_ids = torch.randint(0, n_img, (n_roi,)) >>> img_ids = img_ids.float() >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) >>> # Create other args >>> labels = torch.randint(0, 2, (n_roi,)).long() >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) >>> # For each image, pretend random positive boxes are gts >>> is_label_pos = (labels.numpy() > 0).astype(np.int) >>> lbl_per_img = kwarray.group_items(is_label_pos, ... img_ids.numpy()) >>> pos_per_img = [sum(lbl_per_img.get(gid, [])) ... for gid in range(n_img)] >>> pos_is_gts = [ >>> torch.randint(0, 2, (npos,)).byte().sort( >>> descending=True)[0] >>> for npos in pos_per_img >>> ] >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds, >>> pos_is_gts, img_metas) >>> print(bboxes_list) """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() <= len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] bbox_pred_ = bbox_preds[inds] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): Rois from `rpn_head` or last stage `bbox_head`, has shape (num_proposals, 4) or (num_proposals, 5). label (Tensor): Only used when `self.reg_class_agnostic` is False, has shape (num_proposals, ). bbox_pred (Tensor): Regression prediction of current stage `bbox_head`. When `self.reg_class_agnostic` is False, it has shape (n, num_classes * 4), otherwise it has shape (n, 4). img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape) if not self.reg_class_agnostic: label = label * 4 inds = torch.stack((label, label + 1, label + 2, label + 3), 1) bbox_pred = torch.gather(bbox_pred, 1, inds) assert bbox_pred.size(1) == 4 max_shape = img_meta['img_shape'] if rois.size(1) == 4: new_rois = self.bbox_coder.decode( rois, bbox_pred, max_shape=max_shape) else: bboxes = self.bbox_coder.decode( rois[:, 1:], bbox_pred, max_shape=max_shape) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois def onnx_export(self, rois, cls_score, bbox_pred, img_shape, cfg=None, **kwargs): """Transform network output for a batch into bbox predictions. Args: rois (Tensor): Boxes to be transformed. Has shape (B, num_boxes, 5) cls_score (Tensor): Box scores. has shape (B, num_boxes, num_classes + 1), 1 represent the background. bbox_pred (Tensor, optional): Box energies / deltas for, has shape (B, num_boxes, num_classes * 4) when. img_shape (torch.Tensor): Shape of image. cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ assert rois.ndim == 3, 'Only support export two stage ' \ 'model to ONNX ' \ 'with batch dimension. ' if self.custom_cls_channels: scores = self.loss_cls.get_activation(cls_score) else: scores = F.softmax( cls_score, dim=-1) if cls_score is not None else None if bbox_pred is not None: bboxes = self.bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=img_shape) else: bboxes = rois[..., 1:].clone() if img_shape is not None: max_shape = bboxes.new_tensor(img_shape)[..., :2] min_xy = bboxes.new_tensor(0) max_xy = torch.cat( [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment from mmdet.core.export import add_dummy_nms_for_onnx max_output_boxes_per_class = cfg.nms.get('max_output_boxes_per_class', cfg.max_per_img) iou_threshold = cfg.nms.get('iou_threshold', 0.5) score_threshold = cfg.score_thr nms_pre = cfg.get('deploy_nms_pre', -1) scores = scores[..., :self.num_classes] if self.reg_class_agnostic: return add_dummy_nms_for_onnx( bboxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, pre_top_k=nms_pre, after_top_k=cfg.max_per_img) else: batch_size = scores.shape[0] labels = torch.arange( self.num_classes, dtype=torch.long).to(scores.device) labels = labels.view(1, 1, -1).expand_as(scores) labels = labels.reshape(batch_size, -1) scores = scores.reshape(batch_size, -1) bboxes = bboxes.reshape(batch_size, -1, 4) max_size = torch.max(img_shape) # Offset bboxes of each class so that bboxes of different labels # do not overlap. offsets = (labels * max_size + 1).unsqueeze(2) bboxes_for_nms = bboxes + offsets batch_dets, labels = add_dummy_nms_for_onnx( bboxes_for_nms, scores.unsqueeze(2), max_output_boxes_per_class, iou_threshold, score_threshold, pre_top_k=nms_pre, after_top_k=cfg.max_per_img, labels=labels) # Offset the bboxes back after dummy nms. offsets = (labels * max_size + 1).unsqueeze(2) # Indexing + inplace operation fails with dynamic shape in ONNX # original style: batch_dets[..., :4] -= offsets bboxes, scores = batch_dets[..., 0:4], batch_dets[..., 4:5] bboxes -= offsets batch_dets = torch.cat([bboxes, scores], dim=2) return batch_dets, labels
25,657
42.122689
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/sabl_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, force_fp32 from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms from mmdet.models.builder import HEADS, build_loss from mmdet.models.losses import accuracy @HEADS.register_module() class SABLHead(BaseModule): """Side-Aware Boundary Localization (SABL) for RoI-Head. Side-Aware features are extracted by conv layers with an attention mechanism. Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented in BucketingBBoxCoder. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: cls_in_channels (int): Input channels of cls RoI feature. \ Defaults to 256. reg_in_channels (int): Input channels of reg RoI feature. \ Defaults to 256. roi_feat_size (int): Size of RoI features. Defaults to 7. reg_feat_up_ratio (int): Upsample ratio of reg features. \ Defaults to 2. reg_pre_kernel (int): Kernel of 2D conv layers before \ attention pooling. Defaults to 3. reg_post_kernel (int): Kernel of 1D conv layers after \ attention pooling. Defaults to 3. reg_pre_num (int): Number of pre convs. Defaults to 2. reg_post_num (int): Number of post convs. Defaults to 1. num_classes (int): Number of classes in dataset. Defaults to 80. cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. reg_offset_out_channels (int): Hidden and output channel \ of reg offset branch. Defaults to 256. reg_cls_out_channels (int): Hidden and output channel \ of reg cls branch. Defaults to 256. num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. reg_class_agnostic (bool): Class agnostic regression or not. \ Defaults to True. norm_cfg (dict): Config of norm layers. Defaults to None. bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, num_classes, cls_in_channels=256, reg_in_channels=256, roi_feat_size=7, reg_feat_up_ratio=2, reg_pre_kernel=3, reg_post_kernel=3, reg_pre_num=2, reg_post_num=1, cls_out_channels=1024, reg_offset_out_channels=256, reg_cls_out_channels=256, num_cls_fcs=1, num_reg_fcs=0, reg_class_agnostic=True, norm_cfg=None, bbox_coder=dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg=dict( type='SmoothL1Loss', beta=0.1, loss_weight=1.0), init_cfg=None): super(SABLHead, self).__init__(init_cfg) self.cls_in_channels = cls_in_channels self.reg_in_channels = reg_in_channels self.roi_feat_size = roi_feat_size self.reg_feat_up_ratio = int(reg_feat_up_ratio) self.num_buckets = bbox_coder['num_buckets'] assert self.reg_feat_up_ratio // 2 >= 1 self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio assert self.up_reg_feat_size == bbox_coder['num_buckets'] self.reg_pre_kernel = reg_pre_kernel self.reg_post_kernel = reg_post_kernel self.reg_pre_num = reg_pre_num self.reg_post_num = reg_post_num self.num_classes = num_classes self.cls_out_channels = cls_out_channels self.reg_offset_out_channels = reg_offset_out_channels self.reg_cls_out_channels = reg_cls_out_channels self.num_cls_fcs = num_cls_fcs self.num_reg_fcs = num_reg_fcs self.reg_class_agnostic = reg_class_agnostic assert self.reg_class_agnostic self.norm_cfg = norm_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox_cls = build_loss(loss_bbox_cls) self.loss_bbox_reg = build_loss(loss_bbox_reg) self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, self.cls_in_channels, self.roi_feat_size, self.cls_out_channels) self.side_num = int(np.ceil(self.num_buckets / 2)) if self.reg_feat_up_ratio > 1: self.upsample_x = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.upsample_y = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.reg_pre_convs = nn.ModuleList() for i in range(self.reg_pre_num): reg_pre_conv = ConvModule( reg_in_channels, reg_in_channels, kernel_size=reg_pre_kernel, padding=reg_pre_kernel // 2, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_pre_convs.append(reg_pre_conv) self.reg_post_conv_xs = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_x = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(1, reg_post_kernel), padding=(0, reg_post_kernel // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_xs.append(reg_post_conv_x) self.reg_post_conv_ys = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_y = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(reg_post_kernel, 1), padding=(reg_post_kernel // 2, 0), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_ys.append(reg_post_conv_y) self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_cls_out_channels) self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_offset_out_channels) self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) if init_cfg is None: self.init_cfg = [ dict( type='Xavier', layer='Linear', distribution='uniform', override=[ dict(type='Normal', name='reg_conv_att_x', std=0.01), dict(type='Normal', name='reg_conv_att_y', std=0.01), dict(type='Normal', name='fc_reg_cls', std=0.01), dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg_offset', std=0.001) ]) ] if self.reg_feat_up_ratio > 1: self.init_cfg += [ dict( type='Kaiming', distribution='normal', override=[ dict(name='upsample_x'), dict(name='upsample_y') ]) ] def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, fc_out_channels): in_channels = in_channels * roi_feat_size * roi_feat_size branch_fcs = nn.ModuleList() for i in range(num_branch_fcs): fc_in_channels = (in_channels if i == 0 else fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) return branch_fcs def cls_forward(self, cls_x): cls_x = cls_x.view(cls_x.size(0), -1) for fc in self.cls_fcs: cls_x = self.relu(fc(cls_x)) cls_score = self.fc_cls(cls_x) return cls_score def attention_pool(self, reg_x): """Extract direction-specific features fx and fy with attention methanism.""" reg_fx = reg_x reg_fy = reg_x reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) reg_fx = (reg_fx * reg_fx_att).sum(dim=2) reg_fy = (reg_fy * reg_fy_att).sum(dim=3) return reg_fx, reg_fy def side_aware_feature_extractor(self, reg_x): """Refine and extract side-aware features without split them.""" for reg_pre_conv in self.reg_pre_convs: reg_x = reg_pre_conv(reg_x) reg_fx, reg_fy = self.attention_pool(reg_x) if self.reg_post_num > 0: reg_fx = reg_fx.unsqueeze(2) reg_fy = reg_fy.unsqueeze(3) for i in range(self.reg_post_num): reg_fx = self.reg_post_conv_xs[i](reg_fx) reg_fy = self.reg_post_conv_ys[i](reg_fy) reg_fx = reg_fx.squeeze(2) reg_fy = reg_fy.squeeze(3) if self.reg_feat_up_ratio > 1: reg_fx = self.relu(self.upsample_x(reg_fx)) reg_fy = self.relu(self.upsample_y(reg_fy)) reg_fx = torch.transpose(reg_fx, 1, 2) reg_fy = torch.transpose(reg_fy, 1, 2) return reg_fx.contiguous(), reg_fy.contiguous() def reg_pred(self, x, offset_fcs, cls_fcs): """Predict bucketing estimation (cls_pred) and fine regression (offset pred) with side-aware features.""" x_offset = x.view(-1, self.reg_in_channels) x_cls = x.view(-1, self.reg_in_channels) for fc in offset_fcs: x_offset = self.relu(fc(x_offset)) for fc in cls_fcs: x_cls = self.relu(fc(x_cls)) offset_pred = self.fc_reg_offset(x_offset) cls_pred = self.fc_reg_cls(x_cls) offset_pred = offset_pred.view(x.size(0), -1) cls_pred = cls_pred.view(x.size(0), -1) return offset_pred, cls_pred def side_aware_split(self, feat): """Split side-aware features aligned with orders of bucketing targets.""" l_end = int(np.ceil(self.up_reg_feat_size / 2)) r_start = int(np.floor(self.up_reg_feat_size / 2)) feat_fl = feat[:, :l_end] feat_fr = feat[:, r_start:].flip(dims=(1, )) feat_fl = feat_fl.contiguous() feat_fr = feat_fr.contiguous() feat = torch.cat([feat_fl, feat_fr], dim=-1) return feat def bbox_pred_split(self, bbox_pred, num_proposals_per_img): """Split batch bbox prediction back to each image.""" bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) bucket_offset_preds = bucket_offset_preds.split( num_proposals_per_img, 0) bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) return bbox_pred def reg_forward(self, reg_x): outs = self.side_aware_feature_extractor(reg_x) edge_offset_preds = [] edge_cls_preds = [] reg_fx = outs[0] reg_fy = outs[1] offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_x = self.side_aware_split(offset_pred_x) offset_pred_y = self.side_aware_split(offset_pred_y) cls_pred_x = self.side_aware_split(cls_pred_x) cls_pred_y = self.side_aware_split(cls_pred_y) edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) return (edge_cls_preds, edge_offset_preds) def forward(self, x): bbox_pred = self.reg_forward(x) cls_score = self.cls_forward(x) return cls_score, bbox_pred def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] neg_proposals = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels = [res.pos_gt_labels for res in sampling_results] cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, rcnn_train_cfg) (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = cls_reg_targets return (labels, label_weights, (bucket_cls_targets, bucket_offset_targets), (bucket_cls_weights, bucket_offset_weights)) def bucket_target(self, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, rcnn_train_cfg, concat=True): (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = multi_apply( self._bucket_target_single, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bucket_cls_targets = torch.cat(bucket_cls_targets, 0) bucket_cls_weights = torch.cat(bucket_cls_weights, 0) bucket_offset_targets = torch.cat(bucket_offset_targets, 0) bucket_offset_weights = torch.cat(bucket_offset_weights, 0) return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def _bucket_target_single(self, pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, cfg): """Compute bucketing estimation targets and fine regression targets for a single image. Args: pos_proposals (Tensor): positive proposals of a single image, Shape (n_pos, 4) neg_proposals (Tensor): negative proposals of a single image, Shape (n_neg, 4). pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals of a single image, Shape (n_pos, 4). pos_gt_labels (Tensor): gt labels assigned to positive proposals of a single image, Shape (n_pos, ). cfg (dict): Config of calculating targets Returns: tuple: - labels (Tensor): Labels in a single image. \ Shape (n,). - label_weights (Tensor): Label weights in a single image.\ Shape (n,) - bucket_cls_targets (Tensor): Bucket cls targets in \ a single image. Shape (n, num_buckets*2). - bucket_cls_weights (Tensor): Bucket cls weights in \ a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset targets \ in a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset weights \ in a single image. Shape (n, num_buckets*2). """ num_pos = pos_proposals.size(0) num_neg = neg_proposals.size(0) num_samples = num_pos + num_neg labels = pos_gt_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_proposals.new_zeros(num_samples) bucket_cls_targets = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_cls_weights = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_offset_targets = pos_proposals.new_zeros( num_samples, 4 * self.side_num) bucket_offset_weights = pos_proposals.new_zeros( num_samples, 4 * self.side_num) if num_pos > 0: labels[:num_pos] = pos_gt_labels label_weights[:num_pos] = 1.0 (pos_bucket_offset_targets, pos_bucket_offset_weights, pos_bucket_cls_targets, pos_bucket_cls_weights) = self.bbox_coder.encode( pos_proposals, pos_gt_bboxes) bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights if num_neg > 0: label_weights[-num_neg:] = 1.0 return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def loss(self, cls_score, bbox_pred, rois, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_targets, bucket_offset_targets = bbox_targets bucket_cls_weights, bucket_offset_weights = bbox_weights # edge cls bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) losses['loss_bbox_cls'] = self.loss_bbox_cls( bucket_cls_preds, bucket_cls_targets, bucket_cls_weights, avg_factor=bucket_cls_targets.size(0), reduction_override=reduction_override) losses['loss_bbox_reg'] = self.loss_bbox_reg( bucket_offset_preds, bucket_offset_targets, bucket_offset_weights, avg_factor=bucket_offset_targets.size(0), reduction_override=reduction_override) return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None if bbox_pred is not None: bboxes, confidences = self.bbox_coder.decode( rois[:, 1:], bbox_pred, img_shape) else: bboxes = rois[:, 1:].clone() confidences = None if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) if rescale and bboxes.size(0) > 0: if isinstance(scale_factor, float): bboxes /= scale_factor else: bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms( bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=confidences) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. labels (Tensor): Shape (n*bs, ). bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ (n*bs, num_buckets*2)]. pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() == len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] edge_cls_preds, edge_offset_preds = bbox_preds edge_cls_preds_ = edge_cls_preds[inds] edge_offset_preds_ = edge_offset_preds[inds] bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ (n, num_buckets *2)] img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5 if rois.size(1) == 4: new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, img_meta['img_shape']) else: bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois
25,050
41.822222
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .bbox_head import BBoxHead from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, Shared4Conv1FCBBoxHead) from .dii_head import DIIHead from .double_bbox_head import DoubleConvFCBBoxHead from .sabl_head import SABLHead from .scnet_bbox_head import SCNetBBoxHead __all__ = [ 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead', 'SCNetBBoxHead' ]
524
34
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/dii_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.cnn import (bias_init_with_prob, build_activation_layer, build_norm_layer) from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmcv.runner import auto_fp16, force_fp32 from mmdet.core import multi_apply from mmdet.models.builder import HEADS, build_loss from mmdet.models.dense_heads.atss_head import reduce_mean from mmdet.models.losses import accuracy from mmdet.models.utils import build_transformer from .bbox_head import BBoxHead @HEADS.register_module() class DIIHead(BBoxHead): r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_ Args: num_classes (int): Number of class in dataset. Defaults to 80. num_ffn_fcs (int): The number of fully-connected layers in FFNs. Defaults to 2. num_heads (int): The hidden dimension of FFNs. Defaults to 8. num_cls_fcs (int): The number of fully-connected layers in classification subnet. Defaults to 1. num_reg_fcs (int): The number of fully-connected layers in regression subnet. Defaults to 3. feedforward_channels (int): The hidden dimension of FFNs. Defaults to 2048 in_channels (int): Hidden_channels of MultiheadAttention. Defaults to 256. dropout (float): Probability of drop the channel. Defaults to 0.0 ffn_act_cfg (dict): The activation config for FFNs. dynamic_conv_cfg (dict): The convolution config for DynamicConv. loss_iou (dict): The config for iou or giou loss. """ def __init__(self, num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_iou=dict(type='GIoULoss', loss_weight=2.0), init_cfg=None, **kwargs): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(DIIHead, self).__init__( num_classes=num_classes, reg_decoded_bbox=True, reg_class_agnostic=True, init_cfg=init_cfg, **kwargs) self.loss_iou = build_loss(loss_iou) self.in_channels = in_channels self.fp16_enabled = False self.attention = MultiheadAttention(in_channels, num_heads, dropout) self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) self.instance_interactive_conv_dropout = nn.Dropout(dropout) self.instance_interactive_conv_norm = build_norm_layer( dict(type='LN'), in_channels)[1] self.ffn = FFN( in_channels, feedforward_channels, num_ffn_fcs, act_cfg=ffn_act_cfg, dropout=dropout) self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] self.cls_fcs = nn.ModuleList() for _ in range(num_cls_fcs): self.cls_fcs.append( nn.Linear(in_channels, in_channels, bias=False)) self.cls_fcs.append( build_norm_layer(dict(type='LN'), in_channels)[1]) self.cls_fcs.append( build_activation_layer(dict(type='ReLU', inplace=True))) # over load the self.fc_cls in BBoxHead if self.loss_cls.use_sigmoid: self.fc_cls = nn.Linear(in_channels, self.num_classes) else: self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) self.reg_fcs = nn.ModuleList() for _ in range(num_reg_fcs): self.reg_fcs.append( nn.Linear(in_channels, in_channels, bias=False)) self.reg_fcs.append( build_norm_layer(dict(type='LN'), in_channels)[1]) self.reg_fcs.append( build_activation_layer(dict(type='ReLU', inplace=True))) # over load the self.fc_cls in BBoxHead self.fc_reg = nn.Linear(in_channels, 4) assert self.reg_class_agnostic, 'DIIHead only ' \ 'suppport `reg_class_agnostic=True` ' assert self.reg_decoded_bbox, 'DIIHead only ' \ 'suppport `reg_decoded_bbox=True`' def init_weights(self): """Use xavier initialization for all weight parameter and set classification head bias as a specific value when use focal loss.""" super(DIIHead, self).init_weights() for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) else: # adopt the default initialization for # the weight and bias of the layer norm pass if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) @auto_fp16() def forward(self, roi_feat, proposal_feat): """Forward function of Dynamic Instance Interactive Head. Args: roi_feat (Tensor): Roi-pooling features with shape (batch_size*num_proposals, feature_dimensions, pooling_h , pooling_w). proposal_feat (Tensor): Intermediate feature get from diihead in last stage, has shape (batch_size, num_proposals, feature_dimensions) Returns: tuple[Tensor]: Usually a tuple of classification scores and bbox prediction and a intermediate feature. - cls_scores (Tensor): Classification scores for all proposals, has shape (batch_size, num_proposals, num_classes). - bbox_preds (Tensor): Box energies / deltas for all proposals, has shape (batch_size, num_proposals, 4). - obj_feat (Tensor): Object feature before classification and regression subnet, has shape (batch_size, num_proposal, feature_dimensions). """ N, num_proposals = proposal_feat.shape[:2] # Self attention proposal_feat = proposal_feat.permute(1, 0, 2) proposal_feat = self.attention_norm(self.attention(proposal_feat)) attn_feats = proposal_feat.permute(1, 0, 2) # instance interactive proposal_feat = attn_feats.reshape(-1, self.in_channels) proposal_feat_iic = self.instance_interactive_conv( proposal_feat, roi_feat) proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( proposal_feat_iic) obj_feat = self.instance_interactive_conv_norm(proposal_feat) # FFN obj_feat = self.ffn_norm(self.ffn(obj_feat)) cls_feat = obj_feat reg_feat = obj_feat for cls_layer in self.cls_fcs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_fcs: reg_feat = reg_layer(reg_feat) cls_score = self.fc_cls(cls_feat).view( N, num_proposals, self.num_classes if self.loss_cls.use_sigmoid else self.num_classes + 1) bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4) return cls_score, bbox_delta, obj_feat.view( N, num_proposals, self.in_channels), attn_feats @force_fp32(apply_to=('cls_score', 'bbox_pred')) def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, imgs_whwh=None, reduction_override=None, **kwargs): """"Loss function of DIIHead, get loss of all images. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. labels (Tensor): Label of each proposals, has shape (batch_size * num_proposals_single_image label_weights (Tensor): Classification loss weight of each proposals, has shape (batch_size * num_proposals_single_image bbox_targets (Tensor): Regression targets of each proposals, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression loss weight of each proposals's coordinate, has shape (batch_size * num_proposals_single_image, 4), imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ shape (batch_size, num_proposals, 4), the last dimension means [img_width,img_height, img_width, img_height]. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Defaults to None, Returns: dict[str, Tensor]: Dictionary of loss components """ losses = dict() bg_class_ind = self.num_classes # note in spare rcnn num_gt == num_pos pos_inds = (labels >= 0) & (labels < bg_class_ind) num_pos = pos_inds.sum().float() avg_factor = reduce_mean(num_pos) if cls_score is not None: if cls_score.numel() > 0: losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['pos_acc'] = accuracy(cls_score[pos_inds], labels[pos_inds]) if bbox_pred is not None: # 0~self.num_classes-1 are FG, self.num_classes is BG # do not perform bounding box regression for BG anymore. if pos_inds.any(): pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred / imgs_whwh, bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, bbox_weights[pos_inds.type(torch.bool)], avg_factor=avg_factor) losses['loss_iou'] = self.loss_iou( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=avg_factor) else: losses['loss_bbox'] = bbox_pred.sum() * 0 losses['loss_iou'] = bbox_pred.sum() * 0 return losses def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes, pos_gt_bboxes, pos_gt_labels, cfg): """Calculate the ground truth for proposals in the single image according to the sampling results. Almost the same as the implementation in `bbox_head`, we add pos_inds and neg_inds to select positive and negative samples instead of selecting the first num_pos as positive samples. Args: pos_inds (Tensor): The length is equal to the positive sample numbers contain all index of the positive sample in the origin proposal set. neg_inds (Tensor): The length is equal to the negative sample numbers contain all index of the negative sample in the origin proposal set. pos_bboxes (Tensor): Contains all the positive boxes, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. neg_bboxes (Tensor): Contains all the negative boxes, has shape (num_neg, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_bboxes (Tensor): Contains gt_boxes for all positive samples, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_labels (Tensor): Contains gt_labels for all positive samples, has shape (num_pos, ). cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following Tensors: - labels(Tensor): Gt_labels for all proposals, has shape (num_proposals,). - label_weights(Tensor): Labels_weights for all proposals, has shape (num_proposals,). - bbox_targets(Tensor):Regression target for all proposals, has shape (num_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights(Tensor):Regression weights for all proposals, has shape (num_proposals, 4). """ num_pos = pos_bboxes.size(0) num_neg = neg_bboxes.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_bboxes.new_zeros(num_samples) bbox_targets = pos_bboxes.new_zeros(num_samples, 4) bbox_weights = pos_bboxes.new_zeros(num_samples, 4) if num_pos > 0: labels[pos_inds] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[pos_inds] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_bboxes, pos_gt_bboxes) else: pos_bbox_targets = pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1 if num_neg > 0: label_weights[neg_inds] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, concat=True): """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_target_single` function. Args: sampling_results (List[obj:SamplingResults]): Assign results of all images in a batch after sampling. gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, each tensor has shape (num_gt, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. gt_labels (list[Tensor]): Gt_labels of all images in a batch, each tensor has shape (num_gt,). rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ pos_inds_list = [res.pos_inds for res in sampling_results] neg_inds_list = [res.neg_inds for res in sampling_results] pos_bboxes_list = [res.pos_bboxes for res in sampling_results] neg_bboxes_list = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_target_single, pos_inds_list, neg_inds_list, pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights
19,199
43.964871
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmdet.models.builder import HEADS from mmdet.models.utils import build_linear_layer from .bbox_head import BBoxHead @HEADS.register_module() class ConvFCBBoxHead(BBoxHead): r"""More general bbox head, with shared conv and fc layers and two optional separated branches. .. code-block:: none /-> cls convs -> cls fcs -> cls shared convs -> shared fcs \-> reg convs -> reg fcs -> reg """ # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, init_cfg=None, *args, **kwargs): super(ConvFCBBoxHead, self).__init__( *args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = self.num_classes + 1 self.fc_cls = build_linear_layer( self.cls_predictor_cfg, in_features=self.cls_last_dim, out_features=cls_channels) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = build_linear_layer( self.reg_predictor_cfg, in_features=self.reg_last_dim, out_features=out_dim_reg) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False): """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x): # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred @HEADS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__( num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs) @HEADS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared4Conv1FCBBoxHead, self).__init__( num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs)
8,364
35.369565
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, ModuleList from mmdet.models.backbones.resnet import Bottleneck from mmdet.models.builder import HEADS from .bbox_head import BBoxHead class BasicResBlock(BaseModule): """Basic residual block. This block is a little different from the block in the ResNet backbone. The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. Args: in_channels (int): Channels of the input feature map. out_channels (int): Channels of the output feature map. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=None): super(BasicResBlock, self).__init__(init_cfg) # main path self.conv1 = ConvModule( in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule( in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) # identity path self.conv_identity = ConvModule( in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = x + identity out = self.relu(out) return out @HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN .. code-block:: none /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), init_cfg=dict( type='Normal', override=[ dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg', std=0.001), dict( type='Xavier', name='fc_branch', distribution='uniform') ]), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs) assert self.with_avg_pool assert num_convs > 0 assert num_fcs > 0 self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # increase the channel of input features self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) # add conv heads self.conv_branch = self._add_conv_branch() # add fc heads self.fc_branch = self._add_fc_branch() out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) def _add_conv_branch(self): """Add the fc branch which consists of a sequential of conv layers.""" branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self): """Add the fc branch which consists of a sequential of fc layers.""" branch_fcs = ModuleList() for i in range(self.num_fcs): fc_in_channels = ( self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def forward(self, x_cls, x_reg): # conv head x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), -1) bbox_pred = self.fc_reg(x_conv) # fc head x_fc = x_cls.view(x_cls.size(0), -1) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return cls_score, bbox_pred
5,733
31.03352
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/shared_heads/res_layer.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.backbones import ResNet from mmdet.models.builder import SHARED_HEADS from mmdet.models.utils import ResLayer as _ResLayer @SHARED_HEADS.register_module() class ResLayer(BaseModule): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None, pretrained=None, init_cfg=None): super(ResLayer, self).__init__(init_cfg) self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False block, stage_blocks = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = 64 * 2**stage inplanes = 64 * 2**(stage - 1) * block.expansion res_layer = _ResLayer( block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module(f'layer{stage + 1}', res_layer) assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is a deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') @auto_fp16() def forward(self, x): res_layer = getattr(self, f'layer{self.stage + 1}') out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()
2,587
30.950617
76
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/shared_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .res_layer import ResLayer __all__ = ['ResLayer']
104
20
47
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/grid_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class GridHead(BaseModule): def __init__(self, grid_points=9, num_convs=8, roi_feat_size=14, in_channels=256, conv_kernel_size=3, point_feat_channels=64, deconv_kernel_size=4, class_agnostic=False, loss_grid=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=36), init_cfg=[ dict(type='Kaiming', layer=['Conv2d', 'Linear']), dict( type='Normal', layer='ConvTranspose2d', std=0.001, override=dict( type='Normal', name='deconv2', std=0.001, bias=-np.log(0.99 / 0.01))) ]): super(GridHead, self).__init__(init_cfg) self.grid_points = grid_points self.num_convs = num_convs self.roi_feat_size = roi_feat_size self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.point_feat_channels = point_feat_channels self.conv_out_channels = self.point_feat_channels * self.grid_points self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': assert self.conv_out_channels % norm_cfg['num_groups'] == 0 assert self.grid_points >= 4 self.grid_size = int(np.sqrt(self.grid_points)) if self.grid_size * self.grid_size != self.grid_points: raise ValueError('grid_points must be a square number') # the predicted heatmap is half of whole_map_size if not isinstance(self.roi_feat_size, int): raise ValueError('Only square RoIs are supporeted in Grid R-CNN') self.whole_map_size = self.roi_feat_size * 4 # compute point-wise sub-regions self.sub_regions = self.calc_sub_regions() self.convs = [] for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) stride = 2 if i == 0 else 1 padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True)) self.convs = nn.Sequential(*self.convs) self.deconv1 = nn.ConvTranspose2d( self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) self.deconv2 = nn.ConvTranspose2d( self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) # find the 4-neighbor of each grid point self.neighbor_points = [] grid_size = self.grid_size for i in range(grid_size): # i-th column for j in range(grid_size): # j-th row neighbors = [] if i > 0: # left: (i - 1, j) neighbors.append((i - 1) * grid_size + j) if j > 0: # up: (i, j - 1) neighbors.append(i * grid_size + j - 1) if j < grid_size - 1: # down: (i, j + 1) neighbors.append(i * grid_size + j + 1) if i < grid_size - 1: # right: (i + 1, j) neighbors.append((i + 1) * grid_size + j) self.neighbor_points.append(tuple(neighbors)) # total edges in the grid self.num_edges = sum([len(p) for p in self.neighbor_points]) self.forder_trans = nn.ModuleList() # first-order feature transition self.sorder_trans = nn.ModuleList() # second-order feature transition for neighbors in self.neighbor_points: fo_trans = nn.ModuleList() so_trans = nn.ModuleList() for _ in range(len(neighbors)): # each transition module consists of a 5x5 depth-wise conv and # 1x1 conv. fo_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) so_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) self.forder_trans.append(fo_trans) self.sorder_trans.append(so_trans) self.loss_grid = build_loss(loss_grid) def forward(self, x): assert x.shape[-1] == x.shape[-2] == self.roi_feat_size # RoI feature transformation, downsample 2x x = self.convs(x) c = self.point_feat_channels # first-order fusion x_fo = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_fo[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_fo[i] = x_fo[i] + self.forder_trans[i][j]( x[:, point_idx * c:(point_idx + 1) * c]) # second-order fusion x_so = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_so[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) # predicted heatmap with fused features x2 = torch.cat(x_so, dim=1) x2 = self.deconv1(x2) x2 = F.relu(self.norm1(x2), inplace=True) heatmap = self.deconv2(x2) # predicted heatmap with original features (applicable during training) if self.training: x1 = x x1 = self.deconv1(x1) x1 = F.relu(self.norm1(x1), inplace=True) heatmap_unfused = self.deconv2(x1) else: heatmap_unfused = heatmap return dict(fused=heatmap, unfused=heatmap_unfused) def calc_sub_regions(self): """Compute point specific representation regions. See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. """ # to make it consistent with the original implementation, half_size # is computed as 2 * quarter_size, which is smaller half_size = self.whole_map_size // 4 * 2 sub_regions = [] for i in range(self.grid_points): x_idx = i // self.grid_size y_idx = i % self.grid_size if x_idx == 0: sub_x1 = 0 elif x_idx == self.grid_size - 1: sub_x1 = half_size else: ratio = x_idx / (self.grid_size - 1) - 0.25 sub_x1 = max(int(ratio * self.whole_map_size), 0) if y_idx == 0: sub_y1 = 0 elif y_idx == self.grid_size - 1: sub_y1 = half_size else: ratio = y_idx / (self.grid_size - 1) - 0.25 sub_y1 = max(int(ratio * self.whole_map_size), 0) sub_regions.append( (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) return sub_regions def get_targets(self, sampling_results, rcnn_train_cfg): # mix all samples (across images) together. pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], dim=0).cpu() pos_gt_bboxes = torch.cat( [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() assert pos_bboxes.shape == pos_gt_bboxes.shape # expand pos_bboxes to 2x of original size x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) num_rois = pos_bboxes.shape[0] map_size = self.whole_map_size # this is not the final target shape targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), dtype=torch.float) # pre-compute interpolation factors for all grid points. # the first item is the factor of x-dim, and the second is y-dim. # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) factors = [] for j in range(self.grid_points): x_idx = j // self.grid_size y_idx = j % self.grid_size factors.append((1 - x_idx / (self.grid_size - 1), 1 - y_idx / (self.grid_size - 1))) radius = rcnn_train_cfg.pos_radius radius2 = radius**2 for i in range(num_rois): # ignore small bboxes if (pos_bbox_ws[i] <= self.grid_size or pos_bbox_hs[i] <= self.grid_size): continue # for each grid point, mark a small circle as positive for j in range(self.grid_points): factor_x, factor_y = factors[j] gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( 1 - factor_x) * pos_gt_bboxes[i, 2] gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( 1 - factor_y) * pos_gt_bboxes[i, 3] cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * map_size) cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * map_size) for x in range(cx - radius, cx + radius + 1): for y in range(cy - radius, cy + radius + 1): if x >= 0 and x < map_size and y >= 0 and y < map_size: if (x - cx)**2 + (y - cy)**2 <= radius2: targets[i, j, y, x] = 1 # reduce the target heatmap size by a half # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). sub_targets = [] for i in range(self.grid_points): sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) sub_targets = torch.cat(sub_targets, dim=1) sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) return sub_targets def loss(self, grid_pred, grid_targets): loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) loss_grid = loss_fused + loss_unfused return dict(loss_grid=loss_grid) def get_bboxes(self, det_bboxes, grid_pred, img_metas): # TODO: refactoring assert det_bboxes.shape[0] == grid_pred.shape[0] det_bboxes = det_bboxes.cpu() cls_scores = det_bboxes[:, [4]] det_bboxes = det_bboxes[:, :4] grid_pred = grid_pred.sigmoid().cpu() R, c, h, w = grid_pred.shape half_size = self.whole_map_size // 4 * 2 assert h == w == half_size assert c == self.grid_points # find the point with max scores in the half-sized heatmap grid_pred = grid_pred.view(R * c, h * w) pred_scores, pred_position = grid_pred.max(dim=1) xs = pred_position % w ys = pred_position // w # get the position in the whole heatmap instead of half-sized heatmap for i in range(self.grid_points): xs[i::self.grid_points] += self.sub_regions[i][0] ys[i::self.grid_points] += self.sub_regions[i][1] # reshape to (num_rois, grid_points) pred_scores, xs, ys = tuple( map(lambda x: x.view(R, c), [pred_scores, xs, ys])) # get expanded pos_bboxes widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) x1 = (det_bboxes[:, 0, None] - widths / 2) y1 = (det_bboxes[:, 1, None] - heights / 2) # map the grid point to the absolute coordinates abs_xs = (xs.float() + 0.5) / w * widths + x1 abs_ys = (ys.float() + 0.5) / h * heights + y1 # get the grid points indices that fall on the bbox boundaries x1_inds = [i for i in range(self.grid_size)] y1_inds = [i * self.grid_size for i in range(self.grid_size)] x2_inds = [ self.grid_points - self.grid_size + i for i in range(self.grid_size) ] y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] # voting of all grid points on some boundary bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) bbox_res = torch.cat( [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1]) bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0]) return bbox_res
15,579
41.802198
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmcv.runner import auto_fp16, force_fp32 from mmdet.core import mask_target from mmdet.models.builder import HEADS from mmdet.models.dense_heads.atss_head import reduce_mean from mmdet.models.utils import build_transformer from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class DynamicMaskHead(FCNMaskHead): r"""Dynamic Mask Head for `Instances as Queries <http://arxiv.org/abs/2105.01928>`_ Args: num_convs (int): Number of convolution layer. Defaults to 4. roi_feat_size (int): The output size of RoI extractor, Defaults to 14. in_channels (int): Input feature channels. Defaults to 256. conv_kernel_size (int): Kernel size of convolution layers. Defaults to 3. conv_out_channels (int): Output channels of convolution layers. Defaults to 256. num_classes (int): Number of classes. Defaults to 80 class_agnostic (int): Whether generate class agnostic prediction. Defaults to False. dropout (float): Probability of drop the channel. Defaults to 0.0 upsample_cfg (dict): The config for upsample layer. conv_cfg (dict): The convolution layer config. norm_cfg (dict): The norm layer config. dynamic_conv_cfg (dict): The dynamic convolution layer config. loss_mask (dict): The config for mask loss. """ def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_mask=dict(type='DiceLoss', loss_weight=8.0), **kwargs): super(DynamicMaskHead, self).__init__( num_convs=num_convs, roi_feat_size=roi_feat_size, in_channels=in_channels, conv_kernel_size=conv_kernel_size, conv_out_channels=conv_out_channels, num_classes=num_classes, class_agnostic=class_agnostic, upsample_cfg=upsample_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, loss_mask=loss_mask, **kwargs) assert class_agnostic is False, \ 'DynamicMaskHead only support class_agnostic=False' self.fp16_enabled = False self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) def init_weights(self): """Use xavier initialization for all weight parameter and set classification head bias as a specific value when use focal loss.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) nn.init.constant_(self.conv_logits.bias, 0.) @auto_fp16() def forward(self, roi_feat, proposal_feat): """Forward function of DynamicMaskHead. Args: roi_feat (Tensor): Roi-pooling features with shape (batch_size*num_proposals, feature_dimensions, pooling_h , pooling_w). proposal_feat (Tensor): Intermediate feature get from diihead in last stage, has shape (batch_size*num_proposals, feature_dimensions) Returns: mask_pred (Tensor): Predicted foreground masks with shape (batch_size*num_proposals, num_classes, pooling_h*2, pooling_w*2). """ proposal_feat = proposal_feat.reshape(-1, self.in_channels) proposal_feat_iic = self.instance_interactive_conv( proposal_feat, roi_feat) x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, mask_targets, labels): num_pos = labels.new_ones(labels.size()).float().sum() avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item() loss = dict() if mask_pred.size(0) == 0: loss_mask = mask_pred.sum() else: loss_mask = self.loss_mask( mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(), mask_targets, avg_factor=avg_factor) loss['loss_mask'] = loss_mask return loss def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets
5,665
37.283784
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule, Linear from mmcv.runner import ModuleList, auto_fp16 from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class CoarseMaskHead(FCNMaskHead): """Coarse mask head used in PointRend. Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample the input feature map instead of upsample it. Args: num_convs (int): Number of conv layers in the head. Default: 0. num_fcs (int): Number of fc layers in the head. Default: 2. fc_out_channels (int): Number of output channels of fc layer. Default: 1024. downsample_factor (int): The factor that feature map is downsampled by. Default: 2. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, downsample_factor=2, init_cfg=dict( type='Xavier', override=[ dict(name='fcs'), dict(type='Constant', val=0.001, name='fc_logits') ]), *arg, **kwarg): super(CoarseMaskHead, self).__init__( *arg, num_convs=num_convs, upsample_cfg=dict(type=None), init_cfg=None, **kwarg) self.init_cfg = init_cfg self.num_fcs = num_fcs assert self.num_fcs > 0 self.fc_out_channels = fc_out_channels self.downsample_factor = downsample_factor assert self.downsample_factor >= 1 # remove conv_logit delattr(self, 'conv_logits') if downsample_factor > 1: downsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else self.in_channels) self.downsample_conv = ConvModule( downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: self.downsample_conv = None self.output_size = (self.roi_feat_size[0] // downsample_factor, self.roi_feat_size[1] // downsample_factor) self.output_area = self.output_size[0] * self.output_size[1] last_layer_dim = self.conv_out_channels * self.output_area self.fcs = ModuleList() for i in range(num_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels output_channels = self.num_classes * self.output_area self.fc_logits = Linear(last_layer_dim, output_channels) def init_weights(self): super(FCNMaskHead, self).init_weights() @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.downsample_conv is not None: x = self.downsample_conv(x) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_pred = self.fc_logits(x).view( x.size(0), self.num_classes, *self.output_size) return mask_pred
3,551
34.168317
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/maskiou_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.cnn import Conv2d, Linear, MaxPool2d from mmcv.runner import BaseModule, force_fp32 from torch.nn.modules.utils import _pair from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class MaskIoUHead(BaseModule): """Mask IoU Head. This head predicts the IoU of predicted masks and corresponding gt masks. """ def __init__(self, num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80, loss_iou=dict(type='MSELoss', loss_weight=0.5), init_cfg=[ dict(type='Kaiming', override=dict(name='convs')), dict(type='Caffe2Xavier', override=dict(name='fcs')), dict( type='Normal', std=0.01, override=dict(name='fc_mask_iou')) ]): super(MaskIoUHead, self).__init__(init_cfg) self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.num_classes = num_classes self.fp16_enabled = False self.convs = nn.ModuleList() for i in range(num_convs): if i == 0: # concatenation of mask feature and mask prediction in_channels = self.in_channels + 1 else: in_channels = self.conv_out_channels stride = 2 if i == num_convs - 1 else 1 self.convs.append( Conv2d( in_channels, self.conv_out_channels, 3, stride=stride, padding=1)) roi_feat_size = _pair(roi_feat_size) pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) self.fcs = nn.ModuleList() for i in range(num_fcs): in_channels = ( self.conv_out_channels * pooled_area if i == 0 else self.fc_out_channels) self.fcs.append(Linear(in_channels, self.fc_out_channels)) self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) self.relu = nn.ReLU() self.max_pool = MaxPool2d(2, 2) self.loss_iou = build_loss(loss_iou) def forward(self, mask_feat, mask_pred): mask_pred = mask_pred.sigmoid() mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) x = torch.cat((mask_feat, mask_pred_pooled), 1) for conv in self.convs: x = self.relu(conv(x)) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_iou = self.fc_mask_iou(x) return mask_iou @force_fp32(apply_to=('mask_iou_pred', )) def loss(self, mask_iou_pred, mask_iou_targets): pos_inds = mask_iou_targets > 0 if pos_inds.sum() > 0: loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], mask_iou_targets[pos_inds]) else: loss_mask_iou = mask_iou_pred.sum() * 0 return dict(loss_mask_iou=loss_mask_iou) @force_fp32(apply_to=('mask_pred', )) def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets, rcnn_train_cfg): """Compute target of mask IoU. Mask IoU target is the IoU of the predicted mask (inside a bbox) and the gt mask of corresponding gt mask (the whole instance). The intersection area is computed inside the bbox, and the gt mask area is computed with two steps, firstly we compute the gt area inside the bbox, then divide it by the area ratio of gt area inside the bbox and the gt area of the whole instance. Args: sampling_results (list[:obj:`SamplingResult`]): sampling results. gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance) of each image, with the same shape of the input image. mask_pred (Tensor): Predicted masks of each positive proposal, shape (num_pos, h, w). mask_targets (Tensor): Gt mask of each positive proposal, binary map of the shape (num_pos, h, w). rcnn_train_cfg (dict): Training config for R-CNN part. Returns: Tensor: mask iou target (length == num positive). """ pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] # compute the area ratio of gt areas inside the proposals and # the whole instance area_ratios = map(self._get_area_ratio, pos_proposals, pos_assigned_gt_inds, gt_masks) area_ratios = torch.cat(list(area_ratios)) assert mask_targets.size(0) == area_ratios.size(0) mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() mask_pred_areas = mask_pred.sum((-1, -2)) # mask_pred and mask_targets are binary maps overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) # compute the mask area of the whole instance gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) mask_iou_targets = overlap_areas / ( mask_pred_areas + gt_full_areas - overlap_areas) return mask_iou_targets def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios @force_fp32(apply_to=('mask_iou_pred', )) def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): """Get the mask scores. mask_score = bbox_score * mask_iou """ inds = range(det_labels.size(0)) mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1] mask_scores = mask_scores.cpu().numpy() det_labels = det_labels.cpu().numpy() return [mask_scores[det_labels == i] for i in range(self.num_classes)]
7,382
39.125
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from .fused_semantic_head import FusedSemanticHead @HEADS.register_module() class SCNetSemanticHead(FusedSemanticHead): """Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: conv_to_res (bool, optional): if True, change the conv layers to ``SimplifiedBasicBlock``. """ def __init__(self, conv_to_res=True, **kwargs): super(SCNetSemanticHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if self.conv_to_res: num_res_blocks = self.num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks
998
33.448276
72
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import HEADS @HEADS.register_module() class FeatureRelayHead(BaseModule): """Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: in_channels (int, optional): number of input channels. Default: 256. conv_out_channels (int, optional): number of output channels before classification layer. Default: 256. roi_feat_size (int, optional): roi feat size at box head. Default: 7. scale_factor (int, optional): scale factor to match roi feat size at mask head. Default: 2. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2, init_cfg=dict(type='Kaiming', layer='Linear')): super(FeatureRelayHead, self).__init__(init_cfg) assert isinstance(roi_feat_size, int) self.in_channels = in_channels self.out_conv_channels = out_conv_channels self.roi_feat_size = roi_feat_size self.out_channels = (roi_feat_size**2) * out_conv_channels self.scale_factor = scale_factor self.fp16_enabled = False self.fc = nn.Linear(self.in_channels, self.out_channels) self.upsample = nn.Upsample( scale_factor=scale_factor, mode='bilinear', align_corners=True) @auto_fp16() def forward(self, x): """Forward function.""" N, in_C = x.shape if N > 0: out_C = self.out_conv_channels out_HW = self.roi_feat_size x = self.fc(x) x = x.reshape(N, out_C, out_HW, out_HW) x = self.upsample(x) return x return None
1,930
34.759259
78
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/global_context_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock @HEADS.register_module() class GlobalContextHead(BaseModule): """Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: num_convs (int, optional): number of convolutional layer in GlbCtxHead. Default: 4. in_channels (int, optional): number of input channels. Default: 256. conv_out_channels (int, optional): number of output channels before classification layer. Default: 256. num_classes (int, optional): number of classes. Default: 80. loss_weight (float, optional): global context loss weight. Default: 1. conv_cfg (dict, optional): config to init conv layer. Default: None. norm_cfg (dict, optional): config to init norm layer. Default: None. conv_to_res (bool, optional): if True, 2 convs will be grouped into 1 `SimplifiedBasicBlock` using a skip connection. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=1.0, conv_cfg=None, norm_cfg=None, conv_to_res=False, init_cfg=dict( type='Normal', std=0.01, override=dict(name='fc'))): super(GlobalContextHead, self).__init__(init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() @auto_fp16() def forward(self, feats): """Forward function.""" x = feats[-1] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) # multi-class prediction mc_pred = x.reshape(x.size(0), -1) mc_pred = self.fc(mc_pred) return mc_pred, x @force_fp32(apply_to=('pred', )) def loss(self, pred, labels): """Loss function.""" labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for i, label in enumerate(labels): targets[i, label] = 1.0 loss = self.loss_weight * self.criterion(pred, targets) return loss
3,774
36.009804
79
py
DSLA-DSLA
DSLA-DSLA/mmdet/models/roi_heads/mask_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .coarse_mask_head import CoarseMaskHead from .dynamic_mask_head import DynamicMaskHead from .fcn_mask_head import FCNMaskHead from .feature_relay_head import FeatureRelayHead from .fused_semantic_head import FusedSemanticHead from .global_context_head import GlobalContextHead from .grid_head import GridHead from .htc_mask_head import HTCMaskHead from .mask_point_head import MaskPointHead from .maskiou_head import MaskIoUHead from .scnet_mask_head import SCNetMaskHead from .scnet_semantic_head import SCNetSemanticHead __all__ = [ 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead', 'DynamicMaskHead' ]
817
37.952381
70
py