repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
ERD
ERD-main/mmdet/models/layers/bbox_nms.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, Union import torch from mmcv.ops.nms import batched_nms from torch import Tensor from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import ConfigType def multiclass_nms( multi_bboxes: Tensor, multi_scores: Tensor, score_thr: float, nms_cfg: ConfigType, max_num: int = -1, score_factors: Optional[Tensor] = None, return_inds: bool = False, box_dim: int = 4 ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]: """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_cfg (Union[:obj:`ConfigDict`, dict]): a dict that contains the arguments of nms operations. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. score_factors (Tensor, optional): The factors multiplied to scores before applying NMS. Default to None. return_inds (bool, optional): Whether return the indices of kept bboxes. Default to False. box_dim (int): The dimension of boxes. Defaults to 4. Returns: Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]: (dets, labels, indices (optional)), tensors of shape (k, 5), (k), and (k). Dets are boxes with scores. Labels are 0-based. """ num_classes = multi_scores.size(1) - 1 # exclude background category if multi_bboxes.shape[1] > box_dim: bboxes = multi_bboxes.view(multi_scores.size(0), -1, box_dim) else: bboxes = multi_bboxes[:, None].expand( multi_scores.size(0), num_classes, box_dim) scores = multi_scores[:, :-1] labels = torch.arange(num_classes, dtype=torch.long, device=scores.device) labels = labels.view(1, -1).expand_as(scores) bboxes = bboxes.reshape(-1, box_dim) scores = scores.reshape(-1) labels = labels.reshape(-1) if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT # remove low scoring boxes valid_mask = scores > score_thr # multiply score_factor after threshold to preserve more bboxes, improve # mAP by 1% for YOLOv3 if score_factors is not None: # expand the shape to match original shape of score score_factors = score_factors.view(-1, 1).expand( multi_scores.size(0), num_classes) score_factors = score_factors.reshape(-1) scores = scores * score_factors if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT inds = valid_mask.nonzero(as_tuple=False).squeeze(1) bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] else: # TensorRT NMS plugin has invalid output filled with -1 # add dummy data to make detection output correct. bboxes = torch.cat([bboxes, bboxes.new_zeros(1, box_dim)], dim=0) scores = torch.cat([scores, scores.new_zeros(1)], dim=0) labels = torch.cat([labels, labels.new_zeros(1)], dim=0) if bboxes.numel() == 0: if torch.onnx.is_in_onnx_export(): raise RuntimeError('[ONNX Error] Can not record NMS ' 'as it has not been executed this time') dets = torch.cat([bboxes, scores[:, None]], -1) if return_inds: return dets, labels, inds else: return dets, labels dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) if max_num > 0: dets = dets[:max_num] keep = keep[:max_num] if return_inds: return dets, labels[keep], inds[keep] else: return dets, labels[keep] def fast_nms( multi_bboxes: Tensor, multi_scores: Tensor, multi_coeffs: Tensor, score_thr: float, iou_thr: float, top_k: int, max_num: int = -1 ) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]: """Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Dets are boxes with scores. Labels are 0-based. """ scores = multi_scores[:, :-1].t() # [#class, n] scores, idx = scores.sort(1, descending=True) idx = idx[:, :top_k].contiguous() scores = scores[:, :top_k] # [#class, topk] num_classes, num_dets = idx.size() boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] iou.triu_(diagonal=1) iou_max, _ = iou.max(dim=1) # Now just filter out the ones higher than the threshold keep = iou_max <= iou_thr # Second thresholding introduces 0.2 mAP gain at negligible time cost keep *= scores > score_thr # Assign each kept detection to its corresponding class classes = torch.arange( num_classes, device=boxes.device)[:, None].expand_as(keep) classes = classes[keep] boxes = boxes[keep] coeffs = coeffs[keep] scores = scores[keep] # Only keep the top max_num highest scores across all classes scores, idx = scores.sort(0, descending=True) if max_num > 0: idx = idx[:max_num] scores = scores[:max_num] classes = classes[idx] boxes = boxes[idx] coeffs = coeffs[idx] cls_dets = torch.cat([boxes, scores[:, None]], dim=1) return cls_dets, classes, coeffs
6,987
36.772973
78
py
ERD
ERD-main/mmdet/models/layers/res_layer.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import BaseModule, Sequential from torch import Tensor from torch import nn as nn from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig class ResLayer(Sequential): """ResLayer to build ResNet style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Defaults to 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Defaults to False conv_cfg (dict): dictionary to construct and config conv layer. Defaults to None norm_cfg (dict): dictionary to construct and config norm layer. Defaults to dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Defaults to True """ def __init__(self, block: BaseModule, inplanes: int, planes: int, num_blocks: int, stride: int = 1, avg_down: bool = False, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN'), downsample_first: bool = True, **kwargs) -> None: self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] if downsample_first: layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) else: # downsample_first=False is for HourglassModule for _ in range(num_blocks - 1): layers.append( block( inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super().__init__(*layers) class SimplifiedBasicBlock(BaseModule): """Simplified version of original basic residual block. This is used in `SCNet <https://arxiv.org/abs/2012.10150>`_. - Norm layer is now optional - Last ReLU in forward function is removed """ expansion = 1 def __init__(self, inplanes: int, planes: int, stride: int = 1, dilation: int = 1, downsample: Optional[Sequential] = None, style: ConfigType = 'pytorch', with_cp: bool = False, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN'), dcn: OptConfigType = None, plugins: OptConfigType = None, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' assert not with_cp, 'Not implemented yet.' self.with_norm = norm_cfg is not None with_bias = True if norm_cfg is None else False self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=with_bias) if self.with_norm: self.norm1_name, norm1 = build_norm_layer( norm_cfg, planes, postfix=1) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=with_bias) if self.with_norm: self.norm2_name, norm2 = build_norm_layer( norm_cfg, planes, postfix=2) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self) -> Optional[BaseModule]: """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) if self.with_norm else None @property def norm2(self) -> Optional[BaseModule]: """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) if self.with_norm else None def forward(self, x: Tensor) -> Tensor: """Forward function for SimplifiedBasicBlock.""" identity = x out = self.conv1(x) if self.with_norm: out = self.norm1(out) out = self.relu(out) out = self.conv2(out) if self.with_norm: out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out
6,843
33.918367
79
py
ERD
ERD-main/mmdet/models/layers/brick_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version if torch.__version__ == 'parrots': TORCH_VERSION = torch.__version__ else: # torch.__version__ could be 1.3.1+cu92, we only need the first two # for comparison TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) def adaptive_avg_pool2d(input, output_size): """Handle empty batch dimension to adaptive_avg_pool2d. Args: input (tensor): 4D tensor. output_size (int, tuple[int,int]): the target output size. """ if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): if isinstance(output_size, int): output_size = [output_size, output_size] output_size = [*input.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(input, output_size) return empty else: return F.adaptive_avg_pool2d(input, output_size) class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): """Handle empty batch dimension to AdaptiveAvgPool2d.""" def forward(self, x): # PyTorch 1.9 does not support empty tensor inference yet if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): output_size = self.output_size if isinstance(output_size, int): output_size = [output_size, output_size] else: output_size = [ v if v is not None else d for v, d in zip(output_size, x.size()[-2:]) ] output_size = [*x.shape[:2], *output_size] empty = NewEmptyTensorOp.apply(x, output_size) return empty return super().forward(x)
1,856
34.711538
77
py
ERD
ERD-main/mmdet/models/layers/matrix_nms.py
# Copyright (c) OpenMMLab. All rights reserved. import torch def mask_matrix_nms(masks, labels, scores, filter_thr=-1, nms_pre=-1, max_num=-1, kernel='gaussian', sigma=2.0, mask_area=None): """Matrix NMS for multi-class masks. Args: masks (Tensor): Has shape (num_instances, h, w) labels (Tensor): Labels of corresponding masks, has shape (num_instances,). scores (Tensor): Mask scores of corresponding masks, has shape (num_instances). filter_thr (float): Score threshold to filter the masks after matrix nms. Default: -1, which means do not use filter_thr. nms_pre (int): The max number of instances to do the matrix nms. Default: -1, which means do not use nms_pre. max_num (int, optional): If there are more than max_num masks after matrix, only top max_num will be kept. Default: -1, which means do not use max_num. kernel (str): 'linear' or 'gaussian'. sigma (float): std in gaussian method. mask_area (Tensor): The sum of seg_masks. Returns: tuple(Tensor): Processed mask results. - scores (Tensor): Updated scores, has shape (n,). - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of the remaining mask in the input mask, has shape (n,). """ assert len(labels) == len(masks) == len(scores) if len(labels) == 0: return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) if mask_area is None: mask_area = masks.sum((1, 2)).float() else: assert len(masks) == len(mask_area) # sort and keep top nms_pre scores, sort_inds = torch.sort(scores, descending=True) keep_inds = sort_inds if nms_pre > 0 and len(sort_inds) > nms_pre: sort_inds = sort_inds[:nms_pre] keep_inds = keep_inds[:nms_pre] scores = scores[:nms_pre] masks = masks[sort_inds] mask_area = mask_area[sort_inds] labels = labels[sort_inds] num_masks = len(labels) flatten_masks = masks.reshape(num_masks, -1).float() # inter. inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) expanded_mask_area = mask_area.expand(num_masks, num_masks) # Upper triangle iou matrix. iou_matrix = (inter_matrix / (expanded_mask_area + expanded_mask_area.transpose(1, 0) - inter_matrix)).triu(diagonal=1) # label_specific matrix. expanded_labels = labels.expand(num_masks, num_masks) # Upper triangle label matrix. label_matrix = (expanded_labels == expanded_labels.transpose( 1, 0)).triu(diagonal=1) # IoU compensation compensate_iou, _ = (iou_matrix * label_matrix).max(0) compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0) # IoU decay decay_iou = iou_matrix * label_matrix # Calculate the decay_coefficient if kernel == 'gaussian': decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) elif kernel == 'linear': decay_matrix = (1 - decay_iou) / (1 - compensate_iou) decay_coefficient, _ = decay_matrix.min(0) else: raise NotImplementedError( f'{kernel} kernel is not supported in matrix nms!') # update the score. scores = scores * decay_coefficient if filter_thr > 0: keep = scores >= filter_thr keep_inds = keep_inds[keep] if not keep.any(): return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) masks = masks[keep] scores = scores[keep] labels = labels[keep] # sort and keep top max_num scores, sort_inds = torch.sort(scores, descending=True) keep_inds = keep_inds[sort_inds] if max_num > 0 and len(sort_inds) > max_num: sort_inds = sort_inds[:max_num] keep_inds = keep_inds[:max_num] scores = scores[:max_num] masks = masks[sort_inds] labels = labels[sort_inds] return scores, labels, masks, keep_inds
4,622
36.893443
77
py
ERD
ERD-main/mmdet/models/layers/positional_encoding.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import MultiConfig, OptMultiConfig @MODELS.register_module() class SinePositionalEncoding(BaseModule): """Position encoding with sine and cosine functions. See `End-to-End Object Detection with Transformers <https://arxiv.org/pdf/2005.12872>`_ for details. Args: num_feats (int): The feature dimension for each position along x-axis or y-axis. Note the final returned dimension for each position is 2 times of this value. temperature (int, optional): The temperature used for scaling the position embedding. Defaults to 10000. normalize (bool, optional): Whether to normalize the position embedding. Defaults to False. scale (float, optional): A scale factor that scales the position embedding. The scale will be used only when `normalize` is True. Defaults to 2*pi. eps (float, optional): A value added to the denominator for numerical stability. Defaults to 1e-6. offset (float): offset add to embed when do the normalization. Defaults to 0. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None """ def __init__(self, num_feats: int, temperature: int = 10000, normalize: bool = False, scale: float = 2 * math.pi, eps: float = 1e-6, offset: float = 0., init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) if normalize: assert isinstance(scale, (float, int)), 'when normalize is set,' \ 'scale should be provided and in float or int type, ' \ f'found {type(scale)}' self.num_feats = num_feats self.temperature = temperature self.normalize = normalize self.scale = scale self.eps = eps self.offset = offset def forward(self, mask: Tensor) -> Tensor: """Forward function for `SinePositionalEncoding`. Args: mask (Tensor): ByteTensor mask. Non-zero values representing ignored positions, while zero values means valid positions for this image. Shape [bs, h, w]. Returns: pos (Tensor): Returned position embedding with shape [bs, num_feats*2, h, w]. """ # For convenience of exporting to ONNX, it's required to convert # `masks` from bool to int. mask = mask.to(torch.int) not_mask = 1 - mask # logical_not y_embed = not_mask.cumsum(1, dtype=torch.float32) x_embed = not_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = (y_embed + self.offset) / \ (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = (x_embed + self.offset) / \ (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange( self.num_feats, dtype=torch.float32, device=mask.device) dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t # use `view` instead of `flatten` for dynamically exporting to ONNX B, H, W = mask.size() pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(B, H, W, -1) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(B, H, W, -1) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos def __repr__(self) -> str: """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' repr_str += f'temperature={self.temperature}, ' repr_str += f'normalize={self.normalize}, ' repr_str += f'scale={self.scale}, ' repr_str += f'eps={self.eps})' return repr_str @MODELS.register_module() class LearnedPositionalEncoding(BaseModule): """Position embedding with learnable embedding weights. Args: num_feats (int): The feature dimension for each position along x-axis or y-axis. The final returned dimension for each position is 2 times of this value. row_num_embed (int, optional): The dictionary size of row embeddings. Defaults to 50. col_num_embed (int, optional): The dictionary size of col embeddings. Defaults to 50. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_feats: int, row_num_embed: int = 50, col_num_embed: int = 50, init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding') ) -> None: super().__init__(init_cfg=init_cfg) self.row_embed = nn.Embedding(row_num_embed, num_feats) self.col_embed = nn.Embedding(col_num_embed, num_feats) self.num_feats = num_feats self.row_num_embed = row_num_embed self.col_num_embed = col_num_embed def forward(self, mask: Tensor) -> Tensor: """Forward function for `LearnedPositionalEncoding`. Args: mask (Tensor): ByteTensor mask. Non-zero values representing ignored positions, while zero values means valid positions for this image. Shape [bs, h, w]. Returns: pos (Tensor): Returned position embedding with shape [bs, num_feats*2, h, w]. """ h, w = mask.shape[-2:] x = torch.arange(w, device=mask.device) y = torch.arange(h, device=mask.device) x_embed = self.col_embed(x) y_embed = self.row_embed(y) pos = torch.cat( (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( 1, w, 1)), dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) return pos def __repr__(self) -> str: """str: a string that describes the module""" repr_str = self.__class__.__name__ repr_str += f'(num_feats={self.num_feats}, ' repr_str += f'row_num_embed={self.row_num_embed}, ' repr_str += f'col_num_embed={self.col_num_embed})' return repr_str
6,710
38.710059
79
py
ERD
ERD-main/mmdet/models/layers/inverted_residual.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.cnn.bricks import DropPath from mmengine.model import BaseModule from .se_layer import SELayer class InvertedResidual(BaseModule): """Inverted Residual Block. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. mid_channels (int): The input channels of the depthwise convolution. kernel_size (int): The kernel size of the depthwise convolution. Default: 3. stride (int): The stride of the depthwise convolution. Default: 1. se_cfg (dict): Config dict for se layer. Default: None, which means no se layer. with_expand_conv (bool): Use expand conv or not. If set False, mid_channels must be the same with in_channels. Default: True. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). drop_path_rate (float): stochastic depth rate. Defaults to 0. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Returns: Tensor: The output tensor. """ def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), drop_path_rate=0., with_cp=False, init_cfg=None): super(InvertedResidual, self).__init__(init_cfg) self.with_res_shortcut = (stride == 1 and in_channels == out_channels) assert stride in [1, 2], f'stride must in [1, 2]. ' \ f'But received {stride}.' self.with_cp = with_cp self.drop_path = DropPath( drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.with_se = se_cfg is not None self.with_expand_conv = with_expand_conv if self.with_se: assert isinstance(se_cfg, dict) if not self.with_expand_conv: assert mid_channels == in_channels if self.with_expand_conv: self.expand_conv = ConvModule( in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.depthwise_conv = ConvModule( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=mid_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.linear_conv = ConvModule( in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x if self.with_expand_conv: out = self.expand_conv(out) out = self.depthwise_conv(out) if self.with_se: out = self.se(out) out = self.linear_conv(out) if self.with_res_shortcut: return x + self.drop_path(out) else: return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out
4,383
32.465649
78
py
ERD
ERD-main/mmdet/models/layers/activations.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmengine.utils import digit_version from mmdet.registry import MODELS if digit_version(torch.__version__) >= digit_version('1.7.0'): from torch.nn import SiLU else: class SiLU(nn.Module): """Sigmoid Weighted Liner Unit.""" def __init__(self, inplace=True): super().__init__() def forward(self, inputs) -> torch.Tensor: return inputs * torch.sigmoid(inputs) MODELS.register_module(module=SiLU, name='SiLU')
557
23.26087
62
py
ERD
ERD-main/mmdet/models/layers/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .activations import SiLU from .bbox_nms import fast_nms, multiclass_nms from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d from .conv_upsample import ConvUpsample from .csp_layer import CSPLayer from .dropblock import DropBlock from .ema import ExpMomentumEMA from .inverted_residual import InvertedResidual from .matrix_nms import mask_matrix_nms from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder from .normed_predictor import NormedConv2d, NormedLinear from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder from .positional_encoding import (LearnedPositionalEncoding, SinePositionalEncoding) from .res_layer import ResLayer, SimplifiedBasicBlock from .se_layer import ChannelAttention, DyReLU, SELayer # yapf: disable from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator, ConditionalAttention, ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer, DABDetrTransformerDecoder, DABDetrTransformerDecoderLayer, DABDetrTransformerEncoder, DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer, DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer, DetrTransformerDecoder, DetrTransformerDecoderLayer, DetrTransformerEncoder, DetrTransformerEncoderLayer, DinoTransformerDecoder, DynamicConv, Mask2FormerTransformerDecoder, Mask2FormerTransformerDecoderLayer, Mask2FormerTransformerEncoder, PatchEmbed, PatchMerging, coordinate_to_encoding, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw) # yapf: enable __all__ = [ 'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', 'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging', 'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual', 'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU', 'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP', 'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer', 'DetrTransformerEncoder', 'DetrTransformerDecoder', 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder', 'DeformableDetrTransformerEncoderLayer', 'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding', 'coordinate_to_encoding', 'ConditionalAttention', 'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder', 'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder', 'CdnQueryGenerator', 'Mask2FormerTransformerEncoder', 'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder' ]
3,342
52.919355
79
py
ERD
ERD-main/mmdet/models/layers/ema.py
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Optional import torch import torch.nn as nn from mmengine.model import ExponentialMovingAverage from torch import Tensor from mmdet.registry import MODELS @MODELS.register_module() class ExpMomentumEMA(ExponentialMovingAverage): """Exponential moving average (EMA) with exponential momentum strategy, which is used in YOLOX. Args: model (nn.Module): The model to be averaged. momentum (float): The momentum used for updating ema parameter. Ema's parameter are updated with the formula: `averaged_param = (1-momentum) * averaged_param + momentum * source_param`. Defaults to 0.0002. gamma (int): Use a larger momentum early in training and gradually annealing to a smaller value to update the ema model smoothly. The momentum is calculated as `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`. Defaults to 2000. interval (int): Interval between two updates. Defaults to 1. device (torch.device, optional): If provided, the averaged model will be stored on the :attr:`device`. Defaults to None. update_buffers (bool): if True, it will compute running averages for both the parameters and the buffers of the model. Defaults to False. """ def __init__(self, model: nn.Module, momentum: float = 0.0002, gamma: int = 2000, interval=1, device: Optional[torch.device] = None, update_buffers: bool = False) -> None: super().__init__( model=model, momentum=momentum, interval=interval, device=device, update_buffers=update_buffers) assert gamma > 0, f'gamma must be greater than 0, but got {gamma}' self.gamma = gamma def avg_func(self, averaged_param: Tensor, source_param: Tensor, steps: int) -> None: """Compute the moving average of the parameters using the exponential momentum strategy. Args: averaged_param (Tensor): The averaged parameters. source_param (Tensor): The source parameters. steps (int): The number of times the parameters have been updated. """ momentum = (1 - self.momentum) * math.exp( -float(1 + steps) / self.gamma) + self.momentum averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
2,614
38.029851
78
py
ERD
ERD-main/mmdet/models/layers/transformer/dino_layers.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import Tuple, Union import torch from mmengine.model import BaseModule from torch import Tensor, nn from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_xyxy_to_cxcywh from mmdet.utils import OptConfigType from .deformable_detr_layers import DeformableDetrTransformerDecoder from .utils import MLP, coordinate_to_encoding, inverse_sigmoid class DinoTransformerDecoder(DeformableDetrTransformerDecoder): """Transformer encoder of DINO.""" def _init_layers(self) -> None: """Initialize decoder layers.""" super()._init_layers() self.ref_point_head = MLP(self.embed_dims * 2, self.embed_dims, self.embed_dims, 2) self.norm = nn.LayerNorm(self.embed_dims) def forward(self, query: Tensor, value: Tensor, key_padding_mask: Tensor, self_attn_mask: Tensor, reference_points: Tensor, spatial_shapes: Tensor, level_start_index: Tensor, valid_ratios: Tensor, reg_branches: nn.ModuleList, **kwargs) -> Tensor: """Forward function of Transformer encoder. Args: query (Tensor): The input query, has shape (num_queries, bs, dim). value (Tensor): The input values, has shape (num_value, bs, dim). key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` input. ByteTensor, has shape (num_queries, bs). self_attn_mask (Tensor): The attention mask to prevent information leakage from different denoising groups and matching parts, has shape (num_queries_total, num_queries_total). It is `None` when `self.training` is `False`. reference_points (Tensor): The initial reference, has shape (bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h). spatial_shapes (Tensor): Spatial shapes of features in all levels, has shape (num_levels, 2), last dimension represents (h, w). level_start_index (Tensor): The start index of each level. A tensor has shape (num_levels, ) and can be represented as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. valid_ratios (Tensor): The ratios of the valid width and the valid height relative to the width and the height of features in all levels, has shape (bs, num_levels, 2). reg_branches: (obj:`nn.ModuleList`): Used for refining the regression results. Returns: Tensor: Output queries of Transformer encoder, which is also called 'encoder output embeddings' or 'memory', has shape (num_queries, bs, dim) """ intermediate = [] intermediate_reference_points = [reference_points] for lid, layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = \ reference_points[:, :, None] * torch.cat( [valid_ratios, valid_ratios], -1)[:, None] else: assert reference_points.shape[-1] == 2 reference_points_input = \ reference_points[:, :, None] * valid_ratios[:, None] query_sine_embed = coordinate_to_encoding( reference_points_input[:, :, 0, :]) query_pos = self.ref_point_head(query_sine_embed) query = layer( query, query_pos=query_pos, value=value, key_padding_mask=key_padding_mask, self_attn_mask=self_attn_mask, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, reference_points=reference_points_input, **kwargs) if reg_branches is not None: tmp = reg_branches[lid](query) assert reference_points.shape[-1] == 4 new_reference_points = tmp + inverse_sigmoid( reference_points, eps=1e-3) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() if self.return_intermediate: intermediate.append(self.norm(query)) intermediate_reference_points.append(new_reference_points) # NOTE this is for the "Look Forward Twice" module, # in the DeformDETR, reference_points was appended. if self.return_intermediate: return torch.stack(intermediate), torch.stack( intermediate_reference_points) return query, reference_points class CdnQueryGenerator(BaseModule): """Implement query generator of the Contrastive denoising (CDN) proposed in `DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection <https://arxiv.org/abs/2203.03605>`_ Code is modified from the `official github repo <https://github.com/IDEA-Research/DINO>`_. Args: num_classes (int): Number of object classes. embed_dims (int): The embedding dimensions of the generated queries. num_matching_queries (int): The queries number of the matching part. Used for generating dn_mask. label_noise_scale (float): The scale of label noise, defaults to 0.5. box_noise_scale (float): The scale of box noise, defaults to 1.0. group_cfg (:obj:`ConfigDict` or dict, optional): The config of the denoising queries grouping, includes `dynamic`, `num_dn_queries`, and `num_groups`. Two grouping strategies, 'static dn groups' and 'dynamic dn groups', are supported. When `dynamic` is `False`, the `num_groups` should be set, and the number of denoising query groups will always be `num_groups`. When `dynamic` is `True`, the `num_dn_queries` should be set, and the group number will be dynamic to ensure that the denoising queries number will not exceed `num_dn_queries` to prevent large fluctuations of memory. Defaults to `None`. """ def __init__(self, num_classes: int, embed_dims: int, num_matching_queries: int, label_noise_scale: float = 0.5, box_noise_scale: float = 1.0, group_cfg: OptConfigType = None) -> None: super().__init__() self.num_classes = num_classes self.embed_dims = embed_dims self.num_matching_queries = num_matching_queries self.label_noise_scale = label_noise_scale self.box_noise_scale = box_noise_scale # prepare grouping strategy group_cfg = {} if group_cfg is None else group_cfg self.dynamic_dn_groups = group_cfg.get('dynamic', True) if self.dynamic_dn_groups: if 'num_dn_queries' not in group_cfg: warnings.warn("'num_dn_queries' should be set when using " 'dynamic dn groups, use 100 as default.') self.num_dn_queries = group_cfg.get('num_dn_queries', 100) assert isinstance(self.num_dn_queries, int), \ f'Expected the num_dn_queries to have type int, but got ' \ f'{self.num_dn_queries}({type(self.num_dn_queries)}). ' else: assert 'num_groups' in group_cfg, \ 'num_groups should be set when using static dn groups' self.num_groups = group_cfg['num_groups'] assert isinstance(self.num_groups, int), \ f'Expected the num_groups to have type int, but got ' \ f'{self.num_groups}({type(self.num_groups)}). ' # NOTE The original repo of DINO set the num_embeddings 92 for coco, # 91 (0~90) of which represents target classes and the 92 (91) # indicates `Unknown` class. However, the embedding of `unknown` class # is not used in the original DINO. # TODO: num_classes + 1 or num_classes ? self.label_embedding = nn.Embedding(self.num_classes, self.embed_dims) def __call__(self, batch_data_samples: SampleList) -> tuple: """Generate contrastive denoising (cdn) queries with ground truth. Descriptions of the Number Values in code and comments: - num_target_total: the total target number of the input batch samples. - max_num_target: the max target number of the input batch samples. - num_noisy_targets: the total targets number after adding noise, i.e., num_target_total * num_groups * 2. - num_denoising_queries: the length of the output batched queries, i.e., max_num_target * num_groups * 2. NOTE The format of input bboxes in batch_data_samples is unnormalized (x, y, x, y), and the output bbox queries are embedded by normalized (cx, cy, w, h) format bboxes going through inverse_sigmoid. Args: batch_data_samples (list[:obj:`DetDataSample`]): List of the batch data samples, each includes `gt_instance` which has attributes `bboxes` and `labels`. The `bboxes` has unnormalized coordinate format (x, y, x, y). Returns: tuple: The outputs of the dn query generator. - dn_label_query (Tensor): The output content queries for denoising part, has shape (bs, num_denoising_queries, dim), where `num_denoising_queries = max_num_target * num_groups * 2`. - dn_bbox_query (Tensor): The output reference bboxes as positions of queries for denoising part, which are embedded by normalized (cx, cy, w, h) format bboxes going through inverse_sigmoid, has shape (bs, num_denoising_queries, 4) with the last dimension arranged as (cx, cy, w, h). - attn_mask (Tensor): The attention mask to prevent information leakage from different denoising groups and matching parts, will be used as `self_attn_mask` of the `decoder`, has shape (num_queries_total, num_queries_total), where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries`. - dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. """ # normalize bbox and collate ground truth (gt) gt_labels_list = [] gt_bboxes_list = [] for sample in batch_data_samples: img_h, img_w = sample.img_shape bboxes = sample.gt_instances.bboxes factor = bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) bboxes_normalized = bboxes / factor gt_bboxes_list.append(bboxes_normalized) gt_labels_list.append(sample.gt_instances.labels) gt_labels = torch.cat(gt_labels_list) # (num_target_total, 4) gt_bboxes = torch.cat(gt_bboxes_list) num_target_list = [len(bboxes) for bboxes in gt_bboxes_list] max_num_target = max(num_target_list) num_groups = self.get_num_groups(max_num_target) dn_label_query = self.generate_dn_label_query(gt_labels, num_groups) dn_bbox_query = self.generate_dn_bbox_query(gt_bboxes, num_groups) # The `batch_idx` saves the batch index of the corresponding sample # for each target, has shape (num_target_total). batch_idx = torch.cat([ torch.full_like(t.long(), i) for i, t in enumerate(gt_labels_list) ]) dn_label_query, dn_bbox_query = self.collate_dn_queries( dn_label_query, dn_bbox_query, batch_idx, len(batch_data_samples), num_groups) attn_mask = self.generate_dn_mask( max_num_target, num_groups, device=dn_label_query.device) dn_meta = dict( num_denoising_queries=int(max_num_target * 2 * num_groups), num_denoising_groups=num_groups) return dn_label_query, dn_bbox_query, attn_mask, dn_meta def get_num_groups(self, max_num_target: int = None) -> int: """Calculate denoising query groups number. Two grouping strategies, 'static dn groups' and 'dynamic dn groups', are supported. When `self.dynamic_dn_groups` is `False`, the number of denoising query groups will always be `self.num_groups`. When `self.dynamic_dn_groups` is `True`, the group number will be dynamic, ensuring the denoising queries number will not exceed `self.num_dn_queries` to prevent large fluctuations of memory. NOTE The `num_group` is shared for different samples in a batch. When the target numbers in the samples varies, the denoising queries of the samples containing fewer targets are padded to the max length. Args: max_num_target (int, optional): The max target number of the batch samples. It will only be used when `self.dynamic_dn_groups` is `True`. Defaults to `None`. Returns: int: The denoising group number of the current batch. """ if self.dynamic_dn_groups: assert max_num_target is not None, \ 'group_queries should be provided when using ' \ 'dynamic dn groups' if max_num_target == 0: num_groups = 1 else: num_groups = self.num_dn_queries // max_num_target else: num_groups = self.num_groups if num_groups < 1: num_groups = 1 return int(num_groups) def generate_dn_label_query(self, gt_labels: Tensor, num_groups: int) -> Tensor: """Generate noisy labels and their query embeddings. The strategy for generating noisy labels is: Randomly choose labels of `self.label_noise_scale * 0.5` proportion and override each of them with a random object category label. NOTE Not add noise to all labels. Besides, the `self.label_noise_scale * 0.5` arg is the ratio of the chosen positions, which is higher than the actual proportion of noisy labels, because the labels to override may be correct. And the gap becomes larger as the number of target categories decreases. The users should notice this and modify the scale arg or the corresponding logic according to specific dataset. Args: gt_labels (Tensor): The concatenated gt labels of all samples in the batch, has shape (num_target_total, ) where `num_target_total = sum(num_target_list)`. num_groups (int): The number of denoising query groups. Returns: Tensor: The query embeddings of noisy labels, has shape (num_noisy_targets, embed_dims), where `num_noisy_targets = num_target_total * num_groups * 2`. """ assert self.label_noise_scale > 0 gt_labels_expand = gt_labels.repeat(2 * num_groups, 1).view(-1) # Note `* 2` # noqa p = torch.rand_like(gt_labels_expand.float()) chosen_indice = torch.nonzero(p < (self.label_noise_scale * 0.5)).view( -1) # Note `* 0.5` new_labels = torch.randint_like(chosen_indice, 0, self.num_classes) noisy_labels_expand = gt_labels_expand.scatter(0, chosen_indice, new_labels) dn_label_query = self.label_embedding(noisy_labels_expand) return dn_label_query def generate_dn_bbox_query(self, gt_bboxes: Tensor, num_groups: int) -> Tensor: """Generate noisy bboxes and their query embeddings. The strategy for generating noisy bboxes is as follow: .. code:: text +--------------------+ | negative | | +----------+ | | | positive | | | | +-----|----+------------+ | | | | | | | +----+-----+ | | | | | | +---------+----------+ | | | | gt bbox | | | | +---------+----------+ | | | | | | +----+-----+ | | | | | | | +-------------|--- +----+ | | | | positive | | | +----------+ | | negative | +--------------------+ The random noise is added to the top-left and down-right point positions, hence, normalized (x, y, x, y) format of bboxes are required. The noisy bboxes of positive queries have the points both within the inner square, while those of negative queries have the points both between the inner and outer squares. Besides, the length of outer square is twice as long as that of the inner square, i.e., self.box_noise_scale * w_or_h / 2. NOTE The noise is added to all the bboxes. Moreover, there is still unconsidered case when one point is within the positive square and the others is between the inner and outer squares. Args: gt_bboxes (Tensor): The concatenated gt bboxes of all samples in the batch, has shape (num_target_total, 4) with the last dimension arranged as (cx, cy, w, h) where `num_target_total = sum(num_target_list)`. num_groups (int): The number of denoising query groups. Returns: Tensor: The output noisy bboxes, which are embedded by normalized (cx, cy, w, h) format bboxes going through inverse_sigmoid, has shape (num_noisy_targets, 4) with the last dimension arranged as (cx, cy, w, h), where `num_noisy_targets = num_target_total * num_groups * 2`. """ assert self.box_noise_scale > 0 device = gt_bboxes.device # expand gt_bboxes as groups gt_bboxes_expand = gt_bboxes.repeat(2 * num_groups, 1) # xyxy # obtain index of negative queries in gt_bboxes_expand positive_idx = torch.arange( len(gt_bboxes), dtype=torch.long, device=device) positive_idx = positive_idx.unsqueeze(0).repeat(num_groups, 1) positive_idx += 2 * len(gt_bboxes) * torch.arange( num_groups, dtype=torch.long, device=device)[:, None] positive_idx = positive_idx.flatten() negative_idx = positive_idx + len(gt_bboxes) # determine the sign of each element in the random part of the added # noise to be positive or negative randomly. rand_sign = torch.randint_like( gt_bboxes_expand, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0 # [low, high), 1 or -1, randomly # calculate the random part of the added noise rand_part = torch.rand_like(gt_bboxes_expand) # [0, 1) rand_part[negative_idx] += 1.0 # pos: [0, 1); neg: [1, 2) rand_part *= rand_sign # pos: (-1, 1); neg: (-2, -1] U [1, 2) # add noise to the bboxes bboxes_whwh = bbox_xyxy_to_cxcywh(gt_bboxes_expand)[:, 2:].repeat(1, 2) noisy_bboxes_expand = gt_bboxes_expand + torch.mul( rand_part, bboxes_whwh) * self.box_noise_scale / 2 # xyxy noisy_bboxes_expand = noisy_bboxes_expand.clamp(min=0.0, max=1.0) noisy_bboxes_expand = bbox_xyxy_to_cxcywh(noisy_bboxes_expand) dn_bbox_query = inverse_sigmoid(noisy_bboxes_expand, eps=1e-3) return dn_bbox_query def collate_dn_queries(self, input_label_query: Tensor, input_bbox_query: Tensor, batch_idx: Tensor, batch_size: int, num_groups: int) -> Tuple[Tensor]: """Collate generated queries to obtain batched dn queries. The strategy for query collation is as follow: .. code:: text input_queries (num_target_total, query_dim) P_A1 P_B1 P_B2 N_A1 N_B1 N_B2 P'A1 P'B1 P'B2 N'A1 N'B1 N'B2 |________ group1 ________| |________ group2 ________| | V P_A1 Pad0 N_A1 Pad0 P'A1 Pad0 N'A1 Pad0 P_B1 P_B2 N_B1 N_B2 P'B1 P'B2 N'B1 N'B2 |____ group1 ____| |____ group2 ____| batched_queries (batch_size, max_num_target, query_dim) where query_dim is 4 for bbox and self.embed_dims for label. Notation: _-group 1; '-group 2; A-Sample1(has 1 target); B-sample2(has 2 targets) Args: input_label_query (Tensor): The generated label queries of all targets, has shape (num_target_total, embed_dims) where `num_target_total = sum(num_target_list)`. input_bbox_query (Tensor): The generated bbox queries of all targets, has shape (num_target_total, 4) with the last dimension arranged as (cx, cy, w, h). batch_idx (Tensor): The batch index of the corresponding sample for each target, has shape (num_target_total). batch_size (int): The size of the input batch. num_groups (int): The number of denoising query groups. Returns: tuple[Tensor]: Output batched label and bbox queries. - batched_label_query (Tensor): The output batched label queries, has shape (batch_size, max_num_target, embed_dims). - batched_bbox_query (Tensor): The output batched bbox queries, has shape (batch_size, max_num_target, 4) with the last dimension arranged as (cx, cy, w, h). """ device = input_label_query.device num_target_list = [ torch.sum(batch_idx == idx) for idx in range(batch_size) ] max_num_target = max(num_target_list) num_denoising_queries = int(max_num_target * 2 * num_groups) map_query_index = torch.cat([ torch.arange(num_target, device=device) for num_target in num_target_list ]) map_query_index = torch.cat([ map_query_index + max_num_target * i for i in range(2 * num_groups) ]).long() batch_idx_expand = batch_idx.repeat(2 * num_groups, 1).view(-1) mapper = (batch_idx_expand, map_query_index) batched_label_query = torch.zeros( batch_size, num_denoising_queries, self.embed_dims, device=device) batched_bbox_query = torch.zeros( batch_size, num_denoising_queries, 4, device=device) batched_label_query[mapper] = input_label_query batched_bbox_query[mapper] = input_bbox_query return batched_label_query, batched_bbox_query def generate_dn_mask(self, max_num_target: int, num_groups: int, device: Union[torch.device, str]) -> Tensor: """Generate attention mask to prevent information leakage from different denoising groups and matching parts. .. code:: text 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 max_num_target |_| |_________| num_matching_queries |_____________| num_denoising_queries 1 -> True (Masked), means 'can not see'. 0 -> False (UnMasked), means 'can see'. Args: max_num_target (int): The max target number of the input batch samples. num_groups (int): The number of denoising query groups. device (obj:`device` or str): The device of generated mask. Returns: Tensor: The attention mask to prevent information leakage from different denoising groups and matching parts, will be used as `self_attn_mask` of the `decoder`, has shape (num_queries_total, num_queries_total), where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries`. """ num_denoising_queries = int(max_num_target * 2 * num_groups) num_queries_total = num_denoising_queries + self.num_matching_queries attn_mask = torch.zeros( num_queries_total, num_queries_total, device=device, dtype=torch.bool) # Make the matching part cannot see the denoising groups attn_mask[num_denoising_queries:, :num_denoising_queries] = True # Make the denoising groups cannot see each other for i in range(num_groups): # Mask rows of one group per step. row_scope = slice(max_num_target * 2 * i, max_num_target * 2 * (i + 1)) left_scope = slice(max_num_target * 2 * i) right_scope = slice(max_num_target * 2 * (i + 1), num_denoising_queries) attn_mask[row_scope, right_scope] = True attn_mask[row_scope, left_scope] = True return attn_mask
26,710
47.301989
79
py
ERD
ERD-main/mmdet/models/layers/transformer/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import math import warnings from typing import Optional, Sequence, Tuple, Union import torch import torch.nn.functional as F from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer, build_norm_layer) from mmcv.cnn.bricks.drop import Dropout from mmengine.model import BaseModule, ModuleList from mmengine.utils import to_2tuple from torch import Tensor, nn from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig def nlc_to_nchw(x: Tensor, hw_shape: Sequence[int]) -> Tensor: """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. Args: x (Tensor): The input tensor of shape [N, L, C] before conversion. hw_shape (Sequence[int]): The height and width of output feature map. Returns: Tensor: The output tensor of shape [N, C, H, W] after conversion. """ H, W = hw_shape assert len(x.shape) == 3 B, L, C = x.shape assert L == H * W, 'The seq_len does not match H, W' return x.transpose(1, 2).reshape(B, C, H, W).contiguous() def nchw_to_nlc(x): """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. Args: x (Tensor): The input tensor of shape [N, C, H, W] before conversion. Returns: Tensor: The output tensor of shape [N, L, C] after conversion. """ assert len(x.shape) == 4 return x.flatten(2).transpose(1, 2).contiguous() def coordinate_to_encoding(coord_tensor: Tensor, num_feats: int = 128, temperature: int = 10000, scale: float = 2 * math.pi): """Convert coordinate tensor to positional encoding. Args: coord_tensor (Tensor): Coordinate tensor to be converted to positional encoding. With the last dimension as 2 or 4. num_feats (int, optional): The feature dimension for each position along x-axis or y-axis. Note the final returned dimension for each position is 2 times of this value. Defaults to 128. temperature (int, optional): The temperature used for scaling the position embedding. Defaults to 10000. scale (float, optional): A scale factor that scales the position embedding. The scale will be used only when `normalize` is True. Defaults to 2*pi. Returns: Tensor: Returned encoded positional tensor. """ dim_t = torch.arange( num_feats, dtype=torch.float32, device=coord_tensor.device) dim_t = temperature**(2 * (dim_t // 2) / num_feats) x_embed = coord_tensor[..., 0] * scale y_embed = coord_tensor[..., 1] * scale pos_x = x_embed[..., None] / dim_t pos_y = y_embed[..., None] / dim_t pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(2) pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(2) if coord_tensor.size(-1) == 2: pos = torch.cat((pos_y, pos_x), dim=-1) elif coord_tensor.size(-1) == 4: w_embed = coord_tensor[..., 2] * scale pos_w = w_embed[..., None] / dim_t pos_w = torch.stack((pos_w[..., 0::2].sin(), pos_w[..., 1::2].cos()), dim=-1).flatten(2) h_embed = coord_tensor[..., 3] * scale pos_h = h_embed[..., None] / dim_t pos_h = torch.stack((pos_h[..., 0::2].sin(), pos_h[..., 1::2].cos()), dim=-1).flatten(2) pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=-1) else: raise ValueError('Unknown pos_tensor shape(-1):{}'.format( coord_tensor.size(-1))) return pos def inverse_sigmoid(x: Tensor, eps: float = 1e-5) -> Tensor: """Inverse function of sigmoid. Args: x (Tensor): The tensor to do the inverse. eps (float): EPS avoid numerical overflow. Defaults 1e-5. Returns: Tensor: The x has passed the inverse function of sigmoid, has the same shape with input. """ x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) class AdaptivePadding(nn.Module): """Applies padding to input (if needed) so that input can get fully covered by filter you specified. It support two modes "same" and "corner". The "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around input. The "corner" mode would pad zero to bottom right. Args: kernel_size (int | tuple): Size of the kernel: stride (int | tuple): Stride of the filter. Default: 1: dilation (int | tuple): Spacing between kernel elements. Default: 1 padding (str): Support "same" and "corner", "corner" mode would pad zero to bottom right, and "same" mode would pad zero around input. Default: "corner". Example: >>> kernel_size = 16 >>> stride = 16 >>> dilation = 1 >>> input = torch.rand(1, 1, 15, 17) >>> adap_pad = AdaptivePadding( >>> kernel_size=kernel_size, >>> stride=stride, >>> dilation=dilation, >>> padding="corner") >>> out = adap_pad(input) >>> assert (out.shape[2], out.shape[3]) == (16, 32) >>> input = torch.rand(1, 1, 16, 17) >>> out = adap_pad(input) >>> assert (out.shape[2], out.shape[3]) == (16, 32) """ def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): super(AdaptivePadding, self).__init__() assert padding in ('same', 'corner') kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) dilation = to_2tuple(dilation) self.padding = padding self.kernel_size = kernel_size self.stride = stride self.dilation = dilation def get_pad_shape(self, input_shape): input_h, input_w = input_shape kernel_h, kernel_w = self.kernel_size stride_h, stride_w = self.stride output_h = math.ceil(input_h / stride_h) output_w = math.ceil(input_w / stride_w) pad_h = max((output_h - 1) * stride_h + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) pad_w = max((output_w - 1) * stride_w + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) return pad_h, pad_w def forward(self, x): pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) if pad_h > 0 or pad_w > 0: if self.padding == 'corner': x = F.pad(x, [0, pad_w, 0, pad_h]) elif self.padding == 'same': x = F.pad(x, [ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 ]) return x class PatchEmbed(BaseModule): """Image to Patch Embedding. We use a conv layer to implement PatchEmbed. Args: in_channels (int): The num of input channels. Default: 3 embed_dims (int): The dimensions of embedding. Default: 768 conv_type (str): The config dict for embedding conv layer type selection. Default: "Conv2d. kernel_size (int): The kernel_size of embedding conv. Default: 16. stride (int): The slide stride of embedding conv. Default: None (Would be set as `kernel_size`). padding (int | tuple | string ): The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support "same" and "corner" now. Default: "corner". dilation (int): The dilation rate of embedding conv. Default: 1. bias (bool): Bias of embed conv. Default: True. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. input_size (int | tuple | None): The size of input, which will be used to calculate the out size. Only work when `dynamic_size` is False. Default: None. init_cfg (`mmengine.ConfigDict`, optional): The Config for initialization. Default: None. """ def __init__(self, in_channels: int = 3, embed_dims: int = 768, conv_type: str = 'Conv2d', kernel_size: int = 16, stride: int = 16, padding: Union[int, tuple, str] = 'corner', dilation: int = 1, bias: bool = True, norm_cfg: OptConfigType = None, input_size: Union[int, tuple] = None, init_cfg: OptConfigType = None) -> None: super(PatchEmbed, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims if stride is None: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adap_padding = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) # disable the padding of conv padding = 0 else: self.adap_padding = None padding = to_2tuple(padding) self.projection = build_conv_layer( dict(type=conv_type), in_channels=in_channels, out_channels=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) if norm_cfg is not None: self.norm = build_norm_layer(norm_cfg, embed_dims)[1] else: self.norm = None if input_size: input_size = to_2tuple(input_size) # `init_out_size` would be used outside to # calculate the num_patches # when `use_abs_pos_embed` outside self.init_input_size = input_size if self.adap_padding: pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) input_h, input_w = input_size input_h = input_h + pad_h input_w = input_w + pad_w input_size = (input_h, input_w) # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html h_out = (input_size[0] + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // stride[0] + 1 w_out = (input_size[1] + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // stride[1] + 1 self.init_out_size = (h_out, w_out) else: self.init_input_size = None self.init_out_size = None def forward(self, x: Tensor) -> Tuple[Tensor, Tuple[int]]: """ Args: x (Tensor): Has shape (B, C, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_h, out_w). """ if self.adap_padding: x = self.adap_padding(x) x = self.projection(x) out_size = (x.shape[2], x.shape[3]) x = x.flatten(2).transpose(1, 2) if self.norm is not None: x = self.norm(x) return x, out_size class PatchMerging(BaseModule): """Merge patch feature map. This layer groups feature map by kernel_size, and applies norm and linear layers to the grouped feature map. Our implementation uses `nn.Unfold` to merge patch, which is about 25% faster than original implementation. Instead, we need to modify pretrained models for compatibility. Args: in_channels (int): The num of input channels. to gets fully covered by filter and stride you specified.. Default: True. out_channels (int): The num of output channels. kernel_size (int | tuple, optional): the kernel size in the unfold layer. Defaults to 2. stride (int | tuple, optional): the stride of the sliding blocks in the unfold layer. Default: None. (Would be set as `kernel_size`) padding (int | tuple | string ): The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support "same" and "corner" now. Default: "corner". dilation (int | tuple, optional): dilation parameter in the unfold layer. Default: 1. bias (bool, optional): Whether to add bias in linear layer or not. Defaults: False. norm_cfg (dict, optional): Config dict for normalization layer. Default: dict(type='LN'). init_cfg (dict, optional): The extra config for initialization. Default: None. """ def __init__(self, in_channels: int, out_channels: int, kernel_size: Optional[Union[int, tuple]] = 2, stride: Optional[Union[int, tuple]] = None, padding: Union[int, tuple, str] = 'corner', dilation: Optional[Union[int, tuple]] = 1, bias: Optional[bool] = False, norm_cfg: OptConfigType = dict(type='LN'), init_cfg: OptConfigType = None) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.out_channels = out_channels if stride: stride = stride else: stride = kernel_size kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) if isinstance(padding, str): self.adap_padding = AdaptivePadding( kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) # disable the padding of unfold padding = 0 else: self.adap_padding = None padding = to_2tuple(padding) self.sampler = nn.Unfold( kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride) sample_dim = kernel_size[0] * kernel_size[1] * in_channels if norm_cfg is not None: self.norm = build_norm_layer(norm_cfg, sample_dim)[1] else: self.norm = None self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) def forward(self, x: Tensor, input_size: Tuple[int]) -> Tuple[Tensor, Tuple[int]]: """ Args: x (Tensor): Has shape (B, H*W, C_in). input_size (tuple[int]): The spatial shape of x, arrange as (H, W). Default: None. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W). """ B, L, C = x.shape assert isinstance(input_size, Sequence), f'Expect ' \ f'input_size is ' \ f'`Sequence` ' \ f'but get {input_size}' H, W = input_size assert L == H * W, 'input feature has wrong size' x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W # Use nn.Unfold to merge patch. About 25% faster than original method, # but need to modify pretrained model for compatibility if self.adap_padding: x = self.adap_padding(x) H, W = x.shape[-2:] x = self.sampler(x) # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * (self.sampler.kernel_size[0] - 1) - 1) // self.sampler.stride[0] + 1 out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * (self.sampler.kernel_size[1] - 1) - 1) // self.sampler.stride[1] + 1 output_size = (out_h, out_w) x = x.transpose(1, 2) # B, H/2*W/2, 4*C x = self.norm(x) if self.norm else x x = self.reduction(x) return x, output_size class ConditionalAttention(BaseModule): """A wrapper of conditional attention, dropout and residual connection. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. attn_drop (float): A Dropout layer on attn_output_weights. Default: 0.0. proj_drop: A Dropout layer after `nn.MultiheadAttention`. Default: 0.0. cross_attn (bool): Whether the attention module is for cross attention. Default: False keep_query_pos (bool): Whether to transform query_pos before cross attention. Default: False. batch_first (bool): When it is True, Key, Query and Value are shape of (batch, n, embed_dim), otherwise (n, batch, embed_dim). Default: True. init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, embed_dims: int, num_heads: int, attn_drop: float = 0., proj_drop: float = 0., cross_attn: bool = False, keep_query_pos: bool = False, batch_first: bool = True, init_cfg: OptMultiConfig = None): super().__init__(init_cfg=init_cfg) assert batch_first is True, 'Set `batch_first`\ to False is NOT supported in ConditionalAttention. \ First dimension of all DETRs in mmdet is `batch`, \ please set `batch_first` to True.' self.cross_attn = cross_attn self.keep_query_pos = keep_query_pos self.embed_dims = embed_dims self.num_heads = num_heads self.attn_drop = Dropout(attn_drop) self.proj_drop = Dropout(proj_drop) self._init_layers() def _init_layers(self): """Initialize layers for qkv projection.""" embed_dims = self.embed_dims self.qcontent_proj = Linear(embed_dims, embed_dims) self.qpos_proj = Linear(embed_dims, embed_dims) self.kcontent_proj = Linear(embed_dims, embed_dims) self.kpos_proj = Linear(embed_dims, embed_dims) self.v_proj = Linear(embed_dims, embed_dims) if self.cross_attn: self.qpos_sine_proj = Linear(embed_dims, embed_dims) self.out_proj = Linear(embed_dims, embed_dims) nn.init.constant_(self.out_proj.bias, 0.) def forward_attn(self, query: Tensor, key: Tensor, value: Tensor, attn_mask: Tensor = None, key_padding_mask: Tensor = None) -> Tuple[Tensor]: """Forward process for `ConditionalAttention`. Args: query (Tensor): The input query with shape [bs, num_queries, embed_dims]. key (Tensor): The key tensor with shape [bs, num_keys, embed_dims]. If None, the `query` will be used. Defaults to None. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Defaults to None. If None, the `key` will be used. attn_mask (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. Defaults to None. Returns: Tuple[Tensor]: Attention outputs of shape :math:`(N, L, E)`, where :math:`N` is the batch size, :math:`L` is the target sequence length , and :math:`E` is the embedding dimension `embed_dim`. Attention weights per head of shape :math:` (num_heads, L, S)`. where :math:`N` is batch size, :math:`L` is target sequence length, and :math:`S` is the source sequence length. """ assert key.size(1) == value.size(1), \ f'{"key, value must have the same sequence length"}' assert query.size(0) == key.size(0) == value.size(0), \ f'{"batch size must be equal for query, key, value"}' assert query.size(2) == key.size(2), \ f'{"q_dims, k_dims must be equal"}' assert value.size(2) == self.embed_dims, \ f'{"v_dims must be equal to embed_dims"}' bs, tgt_len, hidden_dims = query.size() _, src_len, _ = key.size() head_dims = hidden_dims // self.num_heads v_head_dims = self.embed_dims // self.num_heads assert head_dims * self.num_heads == hidden_dims, \ f'{"hidden_dims must be divisible by num_heads"}' scaling = float(head_dims)**-0.5 q = query * scaling k = key v = value if attn_mask is not None: assert attn_mask.dtype == torch.float32 or \ attn_mask.dtype == torch.float64 or \ attn_mask.dtype == torch.float16 or \ attn_mask.dtype == torch.uint8 or \ attn_mask.dtype == torch.bool, \ 'Only float, byte, and bool types are supported for \ attn_mask' if attn_mask.dtype == torch.uint8: warnings.warn('Byte tensor for attn_mask is deprecated.\ Use bool tensor instead.') attn_mask = attn_mask.to(torch.bool) if attn_mask.dim() == 2: attn_mask = attn_mask.unsqueeze(0) if list(attn_mask.size()) != [1, query.size(1), key.size(1)]: raise RuntimeError( 'The size of the 2D attn_mask is not correct.') elif attn_mask.dim() == 3: if list(attn_mask.size()) != [ bs * self.num_heads, query.size(1), key.size(1) ]: raise RuntimeError( 'The size of the 3D attn_mask is not correct.') else: raise RuntimeError( "attn_mask's dimension {} is not supported".format( attn_mask.dim())) # attn_mask's dim is 3 now. if key_padding_mask is not None and key_padding_mask.dtype == int: key_padding_mask = key_padding_mask.to(torch.bool) q = q.contiguous().view(bs, tgt_len, self.num_heads, head_dims).permute(0, 2, 1, 3).flatten(0, 1) if k is not None: k = k.contiguous().view(bs, src_len, self.num_heads, head_dims).permute(0, 2, 1, 3).flatten(0, 1) if v is not None: v = v.contiguous().view(bs, src_len, self.num_heads, v_head_dims).permute(0, 2, 1, 3).flatten(0, 1) if key_padding_mask is not None: assert key_padding_mask.size(0) == bs assert key_padding_mask.size(1) == src_len attn_output_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_output_weights.size()) == [ bs * self.num_heads, tgt_len, src_len ] if attn_mask is not None: if attn_mask.dtype == torch.bool: attn_output_weights.masked_fill_(attn_mask, float('-inf')) else: attn_output_weights += attn_mask if key_padding_mask is not None: attn_output_weights = attn_output_weights.view( bs, self.num_heads, tgt_len, src_len) attn_output_weights = attn_output_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'), ) attn_output_weights = attn_output_weights.view( bs * self.num_heads, tgt_len, src_len) attn_output_weights = F.softmax( attn_output_weights - attn_output_weights.max(dim=-1, keepdim=True)[0], dim=-1) attn_output_weights = self.attn_drop(attn_output_weights) attn_output = torch.bmm(attn_output_weights, v) assert list( attn_output.size()) == [bs * self.num_heads, tgt_len, v_head_dims] attn_output = attn_output.view(bs, self.num_heads, tgt_len, v_head_dims).permute(0, 2, 1, 3).flatten(2) attn_output = self.out_proj(attn_output) # average attention weights over heads attn_output_weights = attn_output_weights.view(bs, self.num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / self.num_heads def forward(self, query: Tensor, key: Tensor, query_pos: Tensor = None, ref_sine_embed: Tensor = None, key_pos: Tensor = None, attn_mask: Tensor = None, key_padding_mask: Tensor = None, is_first: bool = False) -> Tensor: """Forward function for `ConditionalAttention`. Args: query (Tensor): The input query with shape [bs, num_queries, embed_dims]. key (Tensor): The key tensor with shape [bs, num_keys, embed_dims]. If None, the `query` will be used. Defaults to None. query_pos (Tensor): The positional encoding for query in self attention, with the same shape as `x`. If not None, it will be added to `x` before forward function. Defaults to None. query_sine_embed (Tensor): The positional encoding for query in cross attention, with the same shape as `x`. If not None, it will be added to `x` before forward function. Defaults to None. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Defaults to None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. attn_mask (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. Defaults to None. is_first (bool): A indicator to tell whether the current layer is the first layer of the decoder. Defaults to False. Returns: Tensor: forwarded results with shape [bs, num_queries, embed_dims]. """ if self.cross_attn: q_content = self.qcontent_proj(query) k_content = self.kcontent_proj(key) v = self.v_proj(key) bs, nq, c = q_content.size() _, hw, _ = k_content.size() k_pos = self.kpos_proj(key_pos) if is_first or self.keep_query_pos: q_pos = self.qpos_proj(query_pos) q = q_content + q_pos k = k_content + k_pos else: q = q_content k = k_content q = q.view(bs, nq, self.num_heads, c // self.num_heads) query_sine_embed = self.qpos_sine_proj(ref_sine_embed) query_sine_embed = query_sine_embed.view(bs, nq, self.num_heads, c // self.num_heads) q = torch.cat([q, query_sine_embed], dim=3).view(bs, nq, 2 * c) k = k.view(bs, hw, self.num_heads, c // self.num_heads) k_pos = k_pos.view(bs, hw, self.num_heads, c // self.num_heads) k = torch.cat([k, k_pos], dim=3).view(bs, hw, 2 * c) ca_output = self.forward_attn( query=q, key=k, value=v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] query = query + self.proj_drop(ca_output) else: q_content = self.qcontent_proj(query) q_pos = self.qpos_proj(query_pos) k_content = self.kcontent_proj(query) k_pos = self.kpos_proj(query_pos) v = self.v_proj(query) q = q_content if q_pos is None else q_content + q_pos k = k_content if k_pos is None else k_content + k_pos sa_output = self.forward_attn( query=q, key=k, value=v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] query = query + self.proj_drop(sa_output) return query class MLP(BaseModule): """Very simple multi-layer perceptron (also called FFN) with relu. Mostly used in DETR series detectors. Args: input_dim (int): Feature dim of the input tensor. hidden_dim (int): Feature dim of the hidden layer. output_dim (int): Feature dim of the output tensor. num_layers (int): Number of FFN layers. As the last layer of MLP only contains FFN (Linear). """ def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int) -> None: super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = ModuleList( Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x: Tensor) -> Tensor: """Forward function of MLP. Args: x (Tensor): The input feature, has shape (num_queries, bs, input_dim). Returns: Tensor: The output feature, has shape (num_queries, bs, output_dim). """ for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x @MODELS.register_module() class DynamicConv(BaseModule): """Implements Dynamic Convolution. This module generate parameters for each sample and use bmm to implement 1*1 convolution. Code is modified from the `official github repo <https://github.com/PeizeSun/ SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py#L258>`_ . Args: in_channels (int): The input feature channel. Defaults to 256. feat_channels (int): The inner feature channel. Defaults to 64. out_channels (int, optional): The output feature channel. When not specified, it will be set to `in_channels` by default input_feat_shape (int): The shape of input feature. Defaults to 7. with_proj (bool): Project two-dimentional feature to one-dimentional feature. Default to True. act_cfg (dict): The activation config for DynamicConv. norm_cfg (dict): Config dict for normalization layer. Default layer normalization. init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, in_channels: int = 256, feat_channels: int = 64, out_channels: Optional[int] = None, input_feat_shape: int = 7, with_proj: bool = True, act_cfg: OptConfigType = dict(type='ReLU', inplace=True), norm_cfg: OptConfigType = dict(type='LN'), init_cfg: OptConfigType = None) -> None: super(DynamicConv, self).__init__(init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.out_channels_raw = out_channels self.input_feat_shape = input_feat_shape self.with_proj = with_proj self.act_cfg = act_cfg self.norm_cfg = norm_cfg self.out_channels = out_channels if out_channels else in_channels self.num_params_in = self.in_channels * self.feat_channels self.num_params_out = self.out_channels * self.feat_channels self.dynamic_layer = nn.Linear( self.in_channels, self.num_params_in + self.num_params_out) self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] self.activation = build_activation_layer(act_cfg) num_output = self.out_channels * input_feat_shape**2 if self.with_proj: self.fc_layer = nn.Linear(num_output, self.out_channels) self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] def forward(self, param_feature: Tensor, input_feature: Tensor) -> Tensor: """Forward function for `DynamicConv`. Args: param_feature (Tensor): The feature can be used to generate the parameter, has shape (num_all_proposals, in_channels). input_feature (Tensor): Feature that interact with parameters, has shape (num_all_proposals, in_channels, H, W). Returns: Tensor: The output feature has shape (num_all_proposals, out_channels). """ input_feature = input_feature.flatten(2).permute(2, 0, 1) input_feature = input_feature.permute(1, 0, 2) parameters = self.dynamic_layer(param_feature) param_in = parameters[:, :self.num_params_in].view( -1, self.in_channels, self.feat_channels) param_out = parameters[:, -self.num_params_out:].view( -1, self.feat_channels, self.out_channels) # input_feature has shape (num_all_proposals, H*W, in_channels) # param_in has shape (num_all_proposals, in_channels, feat_channels) # feature has shape (num_all_proposals, H*W, feat_channels) features = torch.bmm(input_feature, param_in) features = self.norm_in(features) features = self.activation(features) # param_out has shape (batch_size, feat_channels, out_channels) features = torch.bmm(features, param_out) features = self.norm_out(features) features = self.activation(features) if self.with_proj: features = features.flatten(1) features = self.fc_layer(features) features = self.fc_norm(features) features = self.activation(features) return features
35,539
39.524515
79
py
ERD
ERD-main/mmdet/models/layers/transformer/dab_detr_layers.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn as nn from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN from mmengine.model import ModuleList from torch import Tensor from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, DetrTransformerEncoder, DetrTransformerEncoderLayer) from .utils import (MLP, ConditionalAttention, coordinate_to_encoding, inverse_sigmoid) class DABDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): """Implements decoder layer in DAB-DETR transformer.""" def _init_layers(self): """Initialize self-attention, cross-attention, FFN, normalization and others.""" self.self_attn = ConditionalAttention(**self.self_attn_cfg) self.cross_attn = ConditionalAttention(**self.cross_attn_cfg) self.embed_dims = self.self_attn.embed_dims self.ffn = FFN(**self.ffn_cfg) norms_list = [ build_norm_layer(self.norm_cfg, self.embed_dims)[1] for _ in range(3) ] self.norms = ModuleList(norms_list) self.keep_query_pos = self.cross_attn.keep_query_pos def forward(self, query: Tensor, key: Tensor, query_pos: Tensor, key_pos: Tensor, ref_sine_embed: Tensor = None, self_attn_masks: Tensor = None, cross_attn_masks: Tensor = None, key_padding_mask: Tensor = None, is_first: bool = False, **kwargs) -> Tensor: """ Args: query (Tensor): The input query with shape [bs, num_queries, dim]. key (Tensor): The key tensor with shape [bs, num_keys, dim]. query_pos (Tensor): The positional encoding for query in self attention, with the same shape as `x`. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. ref_sine_embed (Tensor): The positional encoding for query in cross attention, with the same shape as `x`. Defaults to None. self_attn_masks (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. cross_attn_masks (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. Defaults to None. is_first (bool): A indicator to tell whether the current layer is the first layer of the decoder. Defaults to False. Returns: Tensor: forwarded results with shape [bs, num_queries, dim]. """ query = self.self_attn( query=query, key=query, query_pos=query_pos, key_pos=query_pos, attn_mask=self_attn_masks, **kwargs) query = self.norms[0](query) query = self.cross_attn( query=query, key=key, query_pos=query_pos, key_pos=key_pos, ref_sine_embed=ref_sine_embed, attn_mask=cross_attn_masks, key_padding_mask=key_padding_mask, is_first=is_first, **kwargs) query = self.norms[1](query) query = self.ffn(query) query = self.norms[2](query) return query class DABDetrTransformerDecoder(DetrTransformerDecoder): """Decoder of DAB-DETR. Args: query_dim (int): The last dimension of query pos, 4 for anchor format, 2 for point format. Defaults to 4. query_scale_type (str): Type of transformation applied to content query. Defaults to `cond_elewise`. with_modulated_hw_attn (bool): Whether to inject h&w info during cross conditional attention. Defaults to True. """ def __init__(self, *args, query_dim: int = 4, query_scale_type: str = 'cond_elewise', with_modulated_hw_attn: bool = True, **kwargs): self.query_dim = query_dim self.query_scale_type = query_scale_type self.with_modulated_hw_attn = with_modulated_hw_attn super().__init__(*args, **kwargs) def _init_layers(self): """Initialize decoder layers and other layers.""" assert self.query_dim in [2, 4], \ f'{"dab-detr only supports anchor prior or reference point prior"}' assert self.query_scale_type in [ 'cond_elewise', 'cond_scalar', 'fix_elewise' ] self.layers = ModuleList([ DABDetrTransformerDecoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) embed_dims = self.layers[0].embed_dims self.embed_dims = embed_dims self.post_norm = build_norm_layer(self.post_norm_cfg, embed_dims)[1] if self.query_scale_type == 'cond_elewise': self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2) elif self.query_scale_type == 'cond_scalar': self.query_scale = MLP(embed_dims, embed_dims, 1, 2) elif self.query_scale_type == 'fix_elewise': self.query_scale = nn.Embedding(self.num_layers, embed_dims) else: raise NotImplementedError('Unknown query_scale_type: {}'.format( self.query_scale_type)) self.ref_point_head = MLP(self.query_dim // 2 * embed_dims, embed_dims, embed_dims, 2) if self.with_modulated_hw_attn and self.query_dim == 4: self.ref_anchor_head = MLP(embed_dims, embed_dims, 2, 2) self.keep_query_pos = self.layers[0].keep_query_pos if not self.keep_query_pos: for layer_id in range(self.num_layers - 1): self.layers[layer_id + 1].cross_attn.qpos_proj = None def forward(self, query: Tensor, key: Tensor, query_pos: Tensor, key_pos: Tensor, reg_branches: nn.Module, key_padding_mask: Tensor = None, **kwargs) -> List[Tensor]: """Forward function of decoder. Args: query (Tensor): The input query with shape (bs, num_queries, dim). key (Tensor): The input key with shape (bs, num_keys, dim). query_pos (Tensor): The positional encoding for `query`, with the same shape as `query`. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. reg_branches (nn.Module): The regression branch for dynamically updating references in each layer. key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys). Defaults to `None`. Returns: List[Tensor]: forwarded results with shape (num_decoder_layers, bs, num_queries, dim) if `return_intermediate` is True, otherwise with shape (1, bs, num_queries, dim). references with shape (num_decoder_layers, bs, num_queries, 2/4). """ output = query unsigmoid_references = query_pos reference_points = unsigmoid_references.sigmoid() intermediate_reference_points = [reference_points] intermediate = [] for layer_id, layer in enumerate(self.layers): obj_center = reference_points[..., :self.query_dim] ref_sine_embed = coordinate_to_encoding( coord_tensor=obj_center, num_feats=self.embed_dims // 2) query_pos = self.ref_point_head( ref_sine_embed) # [bs, nq, 2c] -> [bs, nq, c] # For the first decoder layer, do not apply transformation if self.query_scale_type != 'fix_elewise': if layer_id == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(output) else: pos_transformation = self.query_scale.weight[layer_id] # apply transformation ref_sine_embed = ref_sine_embed[ ..., :self.embed_dims] * pos_transformation # modulated height and weight attention if self.with_modulated_hw_attn: assert obj_center.size(-1) == 4 ref_hw = self.ref_anchor_head(output).sigmoid() ref_sine_embed[..., self.embed_dims // 2:] *= \ (ref_hw[..., 0] / obj_center[..., 2]).unsqueeze(-1) ref_sine_embed[..., : self.embed_dims // 2] *= \ (ref_hw[..., 1] / obj_center[..., 3]).unsqueeze(-1) output = layer( output, key, query_pos=query_pos, ref_sine_embed=ref_sine_embed, key_pos=key_pos, key_padding_mask=key_padding_mask, is_first=(layer_id == 0), **kwargs) # iter update tmp_reg_preds = reg_branches(output) tmp_reg_preds[..., :self.query_dim] += inverse_sigmoid( reference_points) new_reference_points = tmp_reg_preds[ ..., :self.query_dim].sigmoid() if layer_id != self.num_layers - 1: intermediate_reference_points.append(new_reference_points) reference_points = new_reference_points.detach() if self.return_intermediate: intermediate.append(self.post_norm(output)) output = self.post_norm(output) if self.return_intermediate: return [ torch.stack(intermediate), torch.stack(intermediate_reference_points), ] else: return [ output.unsqueeze(0), torch.stack(intermediate_reference_points) ] class DABDetrTransformerEncoder(DetrTransformerEncoder): """Encoder of DAB-DETR.""" def _init_layers(self): """Initialize encoder layers.""" self.layers = ModuleList([ DetrTransformerEncoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) embed_dims = self.layers[0].embed_dims self.embed_dims = embed_dims self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2) def forward(self, query: Tensor, query_pos: Tensor, key_padding_mask: Tensor, **kwargs): """Forward function of encoder. Args: query (Tensor): Input queries of encoder, has shape (bs, num_queries, dim). query_pos (Tensor): The positional embeddings of the queries, has shape (bs, num_feat_points, dim). key_padding_mask (Tensor): ByteTensor, the key padding mask of the queries, has shape (bs, num_feat_points). Returns: Tensor: With shape (num_queries, bs, dim). """ for layer in self.layers: pos_scales = self.query_scale(query) query = layer( query, query_pos=query_pos * pos_scales, key_padding_mask=key_padding_mask, **kwargs) return query
11,683
38.076923
79
py
ERD
ERD-main/mmdet/models/layers/transformer/deformable_detr_layers.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, Union import torch from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmcv.ops import MultiScaleDeformableAttention from mmengine.model import ModuleList from torch import Tensor, nn from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, DetrTransformerEncoder, DetrTransformerEncoderLayer) from .utils import inverse_sigmoid class DeformableDetrTransformerEncoder(DetrTransformerEncoder): """Transformer encoder of Deformable DETR.""" def _init_layers(self) -> None: """Initialize encoder layers.""" self.layers = ModuleList([ DeformableDetrTransformerEncoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) self.embed_dims = self.layers[0].embed_dims def forward(self, query: Tensor, query_pos: Tensor, key_padding_mask: Tensor, spatial_shapes: Tensor, level_start_index: Tensor, valid_ratios: Tensor, **kwargs) -> Tensor: """Forward function of Transformer encoder. Args: query (Tensor): The input query, has shape (bs, num_queries, dim). query_pos (Tensor): The positional encoding for query, has shape (bs, num_queries, dim). key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` input. ByteTensor, has shape (bs, num_queries). spatial_shapes (Tensor): Spatial shapes of features in all levels, has shape (num_levels, 2), last dimension represents (h, w). level_start_index (Tensor): The start index of each level. A tensor has shape (num_levels, ) and can be represented as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. valid_ratios (Tensor): The ratios of the valid width and the valid height relative to the width and the height of features in all levels, has shape (bs, num_levels, 2). Returns: Tensor: Output queries of Transformer encoder, which is also called 'encoder output embeddings' or 'memory', has shape (bs, num_queries, dim) """ reference_points = self.get_encoder_reference_points( spatial_shapes, valid_ratios, device=query.device) for layer in self.layers: query = layer( query=query, query_pos=query_pos, key_padding_mask=key_padding_mask, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, reference_points=reference_points, **kwargs) return query @staticmethod def get_encoder_reference_points( spatial_shapes: Tensor, valid_ratios: Tensor, device: Union[torch.device, str]) -> Tensor: """Get the reference points used in encoder. Args: spatial_shapes (Tensor): Spatial shapes of features in all levels, has shape (num_levels, 2), last dimension represents (h, w). valid_ratios (Tensor): The ratios of the valid width and the valid height relative to the width and the height of features in all levels, has shape (bs, num_levels, 2). device (obj:`device` or str): The device acquired by the `reference_points`. Returns: Tensor: Reference points used in decoder, has shape (bs, length, num_levels, 2). """ reference_points_list = [] for lvl, (H, W) in enumerate(spatial_shapes): ref_y, ref_x = torch.meshgrid( torch.linspace( 0.5, H - 0.5, H, dtype=torch.float32, device=device), torch.linspace( 0.5, W - 0.5, W, dtype=torch.float32, device=device)) ref_y = ref_y.reshape(-1)[None] / ( valid_ratios[:, None, lvl, 1] * H) ref_x = ref_x.reshape(-1)[None] / ( valid_ratios[:, None, lvl, 0] * W) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) # [bs, sum(hw), num_level, 2] reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points class DeformableDetrTransformerDecoder(DetrTransformerDecoder): """Transformer Decoder of Deformable DETR.""" def _init_layers(self) -> None: """Initialize decoder layers.""" self.layers = ModuleList([ DeformableDetrTransformerDecoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) self.embed_dims = self.layers[0].embed_dims if self.post_norm_cfg is not None: raise ValueError('There is not post_norm in ' f'{self._get_name()}') def forward(self, query: Tensor, query_pos: Tensor, value: Tensor, key_padding_mask: Tensor, reference_points: Tensor, spatial_shapes: Tensor, level_start_index: Tensor, valid_ratios: Tensor, reg_branches: Optional[nn.Module] = None, **kwargs) -> Tuple[Tensor]: """Forward function of Transformer decoder. Args: query (Tensor): The input queries, has shape (bs, num_queries, dim). query_pos (Tensor): The input positional query, has shape (bs, num_queries, dim). It will be added to `query` before forward function. value (Tensor): The input values, has shape (bs, num_value, dim). key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` input. ByteTensor, has shape (bs, num_value). reference_points (Tensor): The initial reference, has shape (bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h) when `as_two_stage` is `True`, otherwise has shape (bs, num_queries, 2) with the last dimension arranged as (cx, cy). spatial_shapes (Tensor): Spatial shapes of features in all levels, has shape (num_levels, 2), last dimension represents (h, w). level_start_index (Tensor): The start index of each level. A tensor has shape (num_levels, ) and can be represented as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. valid_ratios (Tensor): The ratios of the valid width and the valid height relative to the width and the height of features in all levels, has shape (bs, num_levels, 2). reg_branches: (obj:`nn.ModuleList`, optional): Used for refining the regression results. Only would be passed when `with_box_refine` is `True`, otherwise would be `None`. Returns: tuple[Tensor]: Outputs of Deformable Transformer Decoder. - output (Tensor): Output embeddings of the last decoder, has shape (num_queries, bs, embed_dims) when `return_intermediate` is `False`. Otherwise, Intermediate output embeddings of all decoder layers, has shape (num_decoder_layers, num_queries, bs, embed_dims). - reference_points (Tensor): The reference of the last decoder layer, has shape (bs, num_queries, 4) when `return_intermediate` is `False`. Otherwise, Intermediate references of all decoder layers, has shape (num_decoder_layers, bs, num_queries, 4). The coordinates are arranged as (cx, cy, w, h) """ output = query intermediate = [] intermediate_reference_points = [] for layer_id, layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = \ reference_points[:, :, None] * \ torch.cat([valid_ratios, valid_ratios], -1)[:, None] else: assert reference_points.shape[-1] == 2 reference_points_input = \ reference_points[:, :, None] * \ valid_ratios[:, None] output = layer( output, query_pos=query_pos, value=value, key_padding_mask=key_padding_mask, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, reference_points=reference_points_input, **kwargs) if reg_branches is not None: tmp_reg_preds = reg_branches[layer_id](output) if reference_points.shape[-1] == 4: new_reference_points = tmp_reg_preds + inverse_sigmoid( reference_points) new_reference_points = new_reference_points.sigmoid() else: assert reference_points.shape[-1] == 2 new_reference_points = tmp_reg_preds new_reference_points[..., :2] = tmp_reg_preds[ ..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() if self.return_intermediate: intermediate.append(output) intermediate_reference_points.append(reference_points) if self.return_intermediate: return torch.stack(intermediate), torch.stack( intermediate_reference_points) return output, reference_points class DeformableDetrTransformerEncoderLayer(DetrTransformerEncoderLayer): """Encoder layer of Deformable DETR.""" def _init_layers(self) -> None: """Initialize self_attn, ffn, and norms.""" self.self_attn = MultiScaleDeformableAttention(**self.self_attn_cfg) self.embed_dims = self.self_attn.embed_dims self.ffn = FFN(**self.ffn_cfg) norms_list = [ build_norm_layer(self.norm_cfg, self.embed_dims)[1] for _ in range(2) ] self.norms = ModuleList(norms_list) class DeformableDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): """Decoder layer of Deformable DETR.""" def _init_layers(self) -> None: """Initialize self_attn, cross-attn, ffn, and norms.""" self.self_attn = MultiheadAttention(**self.self_attn_cfg) self.cross_attn = MultiScaleDeformableAttention(**self.cross_attn_cfg) self.embed_dims = self.self_attn.embed_dims self.ffn = FFN(**self.ffn_cfg) norms_list = [ build_norm_layer(self.norm_cfg, self.embed_dims)[1] for _ in range(3) ] self.norms = ModuleList(norms_list)
11,274
43.920319
79
py
ERD
ERD-main/mmdet/models/layers/transformer/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .conditional_detr_layers import (ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer) from .dab_detr_layers import (DABDetrTransformerDecoder, DABDetrTransformerDecoderLayer, DABDetrTransformerEncoder) from .deformable_detr_layers import (DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer, DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer) from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer, DetrTransformerEncoder, DetrTransformerEncoderLayer) from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder from .mask2former_layers import (Mask2FormerTransformerDecoder, Mask2FormerTransformerDecoderLayer, Mask2FormerTransformerEncoder) from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv, PatchEmbed, PatchMerging, coordinate_to_encoding, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw) __all__ = [ 'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed', 'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP', 'DetrTransformerEncoder', 'DetrTransformerDecoder', 'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer', 'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder', 'DeformableDetrTransformerEncoderLayer', 'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding', 'ConditionalAttention', 'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder', 'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder', 'CdnQueryGenerator', 'Mask2FormerTransformerEncoder', 'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder' ]
2,127
58.111111
78
py
ERD
ERD-main/mmdet/models/layers/transformer/conditional_detr_layers.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN from torch import Tensor from torch.nn import ModuleList from .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer from .utils import MLP, ConditionalAttention, coordinate_to_encoding class ConditionalDetrTransformerDecoder(DetrTransformerDecoder): """Decoder of Conditional DETR.""" def _init_layers(self) -> None: """Initialize decoder layers and other layers.""" self.layers = ModuleList([ ConditionalDetrTransformerDecoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) self.embed_dims = self.layers[0].embed_dims self.post_norm = build_norm_layer(self.post_norm_cfg, self.embed_dims)[1] # conditional detr affline self.query_scale = MLP(self.embed_dims, self.embed_dims, self.embed_dims, 2) self.ref_point_head = MLP(self.embed_dims, self.embed_dims, 2, 2) # we have substitute 'qpos_proj' with 'qpos_sine_proj' except for # the first decoder layer), so 'qpos_proj' should be deleted # in other layers. for layer_id in range(self.num_layers - 1): self.layers[layer_id + 1].cross_attn.qpos_proj = None def forward(self, query: Tensor, key: Tensor = None, query_pos: Tensor = None, key_pos: Tensor = None, key_padding_mask: Tensor = None): """Forward function of decoder. Args: query (Tensor): The input query with shape (bs, num_queries, dim). key (Tensor): The input key with shape (bs, num_keys, dim) If `None`, the `query` will be used. Defaults to `None`. query_pos (Tensor): The positional encoding for `query`, with the same shape as `query`. If not `None`, it will be added to `query` before forward function. Defaults to `None`. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. If not `None`, it will be added to `key` before forward function. If `None`, and `query_pos` has the same shape as `key`, then `query_pos` will be used as `key_pos`. Defaults to `None`. key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys). Defaults to `None`. Returns: List[Tensor]: forwarded results with shape (num_decoder_layers, bs, num_queries, dim) if `return_intermediate` is True, otherwise with shape (1, bs, num_queries, dim). References with shape (bs, num_queries, 2). """ reference_unsigmoid = self.ref_point_head( query_pos) # [bs, num_queries, 2] reference = reference_unsigmoid.sigmoid() reference_xy = reference[..., :2] intermediate = [] for layer_id, layer in enumerate(self.layers): if layer_id == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(query) # get sine embedding for the query reference ref_sine_embed = coordinate_to_encoding(coord_tensor=reference_xy) # apply transformation ref_sine_embed = ref_sine_embed * pos_transformation query = layer( query, key=key, query_pos=query_pos, key_pos=key_pos, key_padding_mask=key_padding_mask, ref_sine_embed=ref_sine_embed, is_first=(layer_id == 0)) if self.return_intermediate: intermediate.append(self.post_norm(query)) if self.return_intermediate: return torch.stack(intermediate), reference query = self.post_norm(query) return query.unsqueeze(0), reference class ConditionalDetrTransformerDecoderLayer(DetrTransformerDecoderLayer): """Implements decoder layer in Conditional DETR transformer.""" def _init_layers(self): """Initialize self-attention, cross-attention, FFN, and normalization.""" self.self_attn = ConditionalAttention(**self.self_attn_cfg) self.cross_attn = ConditionalAttention(**self.cross_attn_cfg) self.embed_dims = self.self_attn.embed_dims self.ffn = FFN(**self.ffn_cfg) norms_list = [ build_norm_layer(self.norm_cfg, self.embed_dims)[1] for _ in range(3) ] self.norms = ModuleList(norms_list) def forward(self, query: Tensor, key: Tensor = None, query_pos: Tensor = None, key_pos: Tensor = None, self_attn_masks: Tensor = None, cross_attn_masks: Tensor = None, key_padding_mask: Tensor = None, ref_sine_embed: Tensor = None, is_first: bool = False): """ Args: query (Tensor): The input query, has shape (bs, num_queries, dim) key (Tensor, optional): The input key, has shape (bs, num_keys, dim). If `None`, the `query` will be used. Defaults to `None`. query_pos (Tensor, optional): The positional encoding for `query`, has the same shape as `query`. If not `None`, it will be added to `query` before forward function. Defaults to `None`. ref_sine_embed (Tensor): The positional encoding for query in cross attention, with the same shape as `x`. Defaults to None. key_pos (Tensor, optional): The positional encoding for `key`, has the same shape as `key`. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. self_attn_masks (Tensor, optional): ByteTensor mask, has shape (num_queries, num_keys), Same in `nn.MultiheadAttention. forward`. Defaults to None. cross_attn_masks (Tensor, optional): ByteTensor mask, has shape (num_queries, num_keys), Same in `nn.MultiheadAttention. forward`. Defaults to None. key_padding_mask (Tensor, optional): ByteTensor, has shape (bs, num_keys). Defaults to None. is_first (bool): A indicator to tell whether the current layer is the first layer of the decoder. Defaults to False. Returns: Tensor: Forwarded results, has shape (bs, num_queries, dim). """ query = self.self_attn( query=query, key=query, query_pos=query_pos, key_pos=query_pos, attn_mask=self_attn_masks) query = self.norms[0](query) query = self.cross_attn( query=query, key=key, query_pos=query_pos, key_pos=key_pos, attn_mask=cross_attn_masks, key_padding_mask=key_padding_mask, ref_sine_embed=ref_sine_embed, is_first=is_first) query = self.norms[1](query) query = self.ffn(query) query = self.norms[2](query) return query
7,563
43.233918
78
py
ERD
ERD-main/mmdet/models/layers/transformer/mask2former_layers.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import build_norm_layer from mmengine.model import ModuleList from torch import Tensor from .deformable_detr_layers import DeformableDetrTransformerEncoder from .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer class Mask2FormerTransformerEncoder(DeformableDetrTransformerEncoder): """Encoder in PixelDecoder of Mask2Former.""" def forward(self, query: Tensor, query_pos: Tensor, key_padding_mask: Tensor, spatial_shapes: Tensor, level_start_index: Tensor, valid_ratios: Tensor, reference_points: Tensor, **kwargs) -> Tensor: """Forward function of Transformer encoder. Args: query (Tensor): The input query, has shape (bs, num_queries, dim). query_pos (Tensor): The positional encoding for query, has shape (bs, num_queries, dim). If not None, it will be added to the `query` before forward function. Defaults to None. key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` input. ByteTensor, has shape (bs, num_queries). spatial_shapes (Tensor): Spatial shapes of features in all levels, has shape (num_levels, 2), last dimension represents (h, w). level_start_index (Tensor): The start index of each level. A tensor has shape (num_levels, ) and can be represented as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. valid_ratios (Tensor): The ratios of the valid width and the valid height relative to the width and the height of features in all levels, has shape (bs, num_levels, 2). reference_points (Tensor): The initial reference, has shape (bs, num_queries, 2) with the last dimension arranged as (cx, cy). Returns: Tensor: Output queries of Transformer encoder, which is also called 'encoder output embeddings' or 'memory', has shape (bs, num_queries, dim) """ for layer in self.layers: query = layer( query=query, query_pos=query_pos, key_padding_mask=key_padding_mask, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, reference_points=reference_points, **kwargs) return query class Mask2FormerTransformerDecoder(DetrTransformerDecoder): """Decoder of Mask2Former.""" def _init_layers(self) -> None: """Initialize decoder layers.""" self.layers = ModuleList([ Mask2FormerTransformerDecoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) self.embed_dims = self.layers[0].embed_dims self.post_norm = build_norm_layer(self.post_norm_cfg, self.embed_dims)[1] class Mask2FormerTransformerDecoderLayer(DetrTransformerDecoderLayer): """Implements decoder layer in Mask2Former transformer.""" def forward(self, query: Tensor, key: Tensor = None, value: Tensor = None, query_pos: Tensor = None, key_pos: Tensor = None, self_attn_mask: Tensor = None, cross_attn_mask: Tensor = None, key_padding_mask: Tensor = None, **kwargs) -> Tensor: """ Args: query (Tensor): The input query, has shape (bs, num_queries, dim). key (Tensor, optional): The input key, has shape (bs, num_keys, dim). If `None`, the `query` will be used. Defaults to `None`. value (Tensor, optional): The input value, has the same shape as `key`, as in `nn.MultiheadAttention.forward`. If `None`, the `key` will be used. Defaults to `None`. query_pos (Tensor, optional): The positional encoding for `query`, has the same shape as `query`. If not `None`, it will be added to `query` before forward function. Defaults to `None`. key_pos (Tensor, optional): The positional encoding for `key`, has the same shape as `key`. If not `None`, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. self_attn_mask (Tensor, optional): ByteTensor mask, has shape (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. Defaults to None. cross_attn_mask (Tensor, optional): ByteTensor mask, has shape (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor, optional): The `key_padding_mask` of `self_attn` input. ByteTensor, has shape (bs, num_value). Defaults to None. Returns: Tensor: forwarded results, has shape (bs, num_queries, dim). """ query = self.cross_attn( query=query, key=key, value=value, query_pos=query_pos, key_pos=key_pos, attn_mask=cross_attn_mask, key_padding_mask=key_padding_mask, **kwargs) query = self.norms[0](query) query = self.self_attn( query=query, key=query, value=query, query_pos=query_pos, key_pos=query_pos, attn_mask=self_attn_mask, **kwargs) query = self.norms[1](query) query = self.ffn(query) query = self.norms[2](query) return query
5,960
42.830882
79
py
ERD
ERD-main/mmdet/models/layers/transformer/detr_layers.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Union import torch from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmengine import ConfigDict from mmengine.model import BaseModule, ModuleList from torch import Tensor from mmdet.utils import ConfigType, OptConfigType class DetrTransformerEncoder(BaseModule): """Encoder of DETR. Args: num_layers (int): Number of encoder layers. layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder layer. All the layers will share the same config. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. Defaults to None. """ def __init__(self, num_layers: int, layer_cfg: ConfigType, init_cfg: OptConfigType = None) -> None: super().__init__(init_cfg=init_cfg) self.num_layers = num_layers self.layer_cfg = layer_cfg self._init_layers() def _init_layers(self) -> None: """Initialize encoder layers.""" self.layers = ModuleList([ DetrTransformerEncoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) self.embed_dims = self.layers[0].embed_dims def forward(self, query: Tensor, query_pos: Tensor, key_padding_mask: Tensor, **kwargs) -> Tensor: """Forward function of encoder. Args: query (Tensor): Input queries of encoder, has shape (bs, num_queries, dim). query_pos (Tensor): The positional embeddings of the queries, has shape (bs, num_queries, dim). key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` input. ByteTensor, has shape (bs, num_queries). Returns: Tensor: Has shape (bs, num_queries, dim) if `batch_first` is `True`, otherwise (num_queries, bs, dim). """ for layer in self.layers: query = layer(query, query_pos, key_padding_mask, **kwargs) return query class DetrTransformerDecoder(BaseModule): """Decoder of DETR. Args: num_layers (int): Number of decoder layers. layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder layer. All the layers will share the same config. post_norm_cfg (:obj:`ConfigDict` or dict, optional): Config of the post normalization layer. Defaults to `LN`. return_intermediate (bool, optional): Whether to return outputs of intermediate layers. Defaults to `True`, init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. Defaults to None. """ def __init__(self, num_layers: int, layer_cfg: ConfigType, post_norm_cfg: OptConfigType = dict(type='LN'), return_intermediate: bool = True, init_cfg: Union[dict, ConfigDict] = None) -> None: super().__init__(init_cfg=init_cfg) self.layer_cfg = layer_cfg self.num_layers = num_layers self.post_norm_cfg = post_norm_cfg self.return_intermediate = return_intermediate self._init_layers() def _init_layers(self) -> None: """Initialize decoder layers.""" self.layers = ModuleList([ DetrTransformerDecoderLayer(**self.layer_cfg) for _ in range(self.num_layers) ]) self.embed_dims = self.layers[0].embed_dims self.post_norm = build_norm_layer(self.post_norm_cfg, self.embed_dims)[1] def forward(self, query: Tensor, key: Tensor, value: Tensor, query_pos: Tensor, key_pos: Tensor, key_padding_mask: Tensor, **kwargs) -> Tensor: """Forward function of decoder Args: query (Tensor): The input query, has shape (bs, num_queries, dim). key (Tensor): The input key, has shape (bs, num_keys, dim). value (Tensor): The input value with the same shape as `key`. query_pos (Tensor): The positional encoding for `query`, with the same shape as `query`. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn` input. ByteTensor, has shape (bs, num_value). Returns: Tensor: The forwarded results will have shape (num_decoder_layers, bs, num_queries, dim) if `return_intermediate` is `True` else (1, bs, num_queries, dim). """ intermediate = [] for layer in self.layers: query = layer( query, key=key, value=value, query_pos=query_pos, key_pos=key_pos, key_padding_mask=key_padding_mask, **kwargs) if self.return_intermediate: intermediate.append(self.post_norm(query)) query = self.post_norm(query) if self.return_intermediate: return torch.stack(intermediate) return query.unsqueeze(0) class DetrTransformerEncoderLayer(BaseModule): """Implements encoder layer in DETR transformer. Args: self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self attention. ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. norm_cfg (:obj:`ConfigDict` or dict, optional): Config for normalization layers. All the layers will share the same config. Defaults to `LN`. init_cfg (:obj:`ConfigDict` or dict, optional): Config to control the initialization. Defaults to None. """ def __init__(self, self_attn_cfg: OptConfigType = dict( embed_dims=256, num_heads=8, dropout=0.0), ffn_cfg: OptConfigType = dict( embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0., act_cfg=dict(type='ReLU', inplace=True)), norm_cfg: OptConfigType = dict(type='LN'), init_cfg: OptConfigType = None) -> None: super().__init__(init_cfg=init_cfg) self.self_attn_cfg = self_attn_cfg if 'batch_first' not in self.self_attn_cfg: self.self_attn_cfg['batch_first'] = True else: assert self.self_attn_cfg['batch_first'] is True, 'First \ dimension of all DETRs in mmdet is `batch`, \ please set `batch_first` flag.' self.ffn_cfg = ffn_cfg self.norm_cfg = norm_cfg self._init_layers() def _init_layers(self) -> None: """Initialize self-attention, FFN, and normalization.""" self.self_attn = MultiheadAttention(**self.self_attn_cfg) self.embed_dims = self.self_attn.embed_dims self.ffn = FFN(**self.ffn_cfg) norms_list = [ build_norm_layer(self.norm_cfg, self.embed_dims)[1] for _ in range(2) ] self.norms = ModuleList(norms_list) def forward(self, query: Tensor, query_pos: Tensor, key_padding_mask: Tensor, **kwargs) -> Tensor: """Forward function of an encoder layer. Args: query (Tensor): The input query, has shape (bs, num_queries, dim). query_pos (Tensor): The positional encoding for query, with the same shape as `query`. key_padding_mask (Tensor): The `key_padding_mask` of `self_attn` input. ByteTensor. has shape (bs, num_queries). Returns: Tensor: forwarded results, has shape (bs, num_queries, dim). """ query = self.self_attn( query=query, key=query, value=query, query_pos=query_pos, key_pos=query_pos, key_padding_mask=key_padding_mask, **kwargs) query = self.norms[0](query) query = self.ffn(query) query = self.norms[1](query) return query class DetrTransformerDecoderLayer(BaseModule): """Implements decoder layer in DETR transformer. Args: self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self attention. cross_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for cross attention. ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN. norm_cfg (:obj:`ConfigDict` or dict, optional): Config for normalization layers. All the layers will share the same config. Defaults to `LN`. init_cfg (:obj:`ConfigDict` or dict, optional): Config to control the initialization. Defaults to None. """ def __init__(self, self_attn_cfg: OptConfigType = dict( embed_dims=256, num_heads=8, dropout=0.0, batch_first=True), cross_attn_cfg: OptConfigType = dict( embed_dims=256, num_heads=8, dropout=0.0, batch_first=True), ffn_cfg: OptConfigType = dict( embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0., act_cfg=dict(type='ReLU', inplace=True), ), norm_cfg: OptConfigType = dict(type='LN'), init_cfg: OptConfigType = None) -> None: super().__init__(init_cfg=init_cfg) self.self_attn_cfg = self_attn_cfg self.cross_attn_cfg = cross_attn_cfg if 'batch_first' not in self.self_attn_cfg: self.self_attn_cfg['batch_first'] = True else: assert self.self_attn_cfg['batch_first'] is True, 'First \ dimension of all DETRs in mmdet is `batch`, \ please set `batch_first` flag.' if 'batch_first' not in self.cross_attn_cfg: self.cross_attn_cfg['batch_first'] = True else: assert self.cross_attn_cfg['batch_first'] is True, 'First \ dimension of all DETRs in mmdet is `batch`, \ please set `batch_first` flag.' self.ffn_cfg = ffn_cfg self.norm_cfg = norm_cfg self._init_layers() def _init_layers(self) -> None: """Initialize self-attention, FFN, and normalization.""" self.self_attn = MultiheadAttention(**self.self_attn_cfg) self.cross_attn = MultiheadAttention(**self.cross_attn_cfg) self.embed_dims = self.self_attn.embed_dims self.ffn = FFN(**self.ffn_cfg) norms_list = [ build_norm_layer(self.norm_cfg, self.embed_dims)[1] for _ in range(3) ] self.norms = ModuleList(norms_list) def forward(self, query: Tensor, key: Tensor = None, value: Tensor = None, query_pos: Tensor = None, key_pos: Tensor = None, self_attn_mask: Tensor = None, cross_attn_mask: Tensor = None, key_padding_mask: Tensor = None, **kwargs) -> Tensor: """ Args: query (Tensor): The input query, has shape (bs, num_queries, dim). key (Tensor, optional): The input key, has shape (bs, num_keys, dim). If `None`, the `query` will be used. Defaults to `None`. value (Tensor, optional): The input value, has the same shape as `key`, as in `nn.MultiheadAttention.forward`. If `None`, the `key` will be used. Defaults to `None`. query_pos (Tensor, optional): The positional encoding for `query`, has the same shape as `query`. If not `None`, it will be added to `query` before forward function. Defaults to `None`. key_pos (Tensor, optional): The positional encoding for `key`, has the same shape as `key`. If not `None`, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. self_attn_mask (Tensor, optional): ByteTensor mask, has shape (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. Defaults to None. cross_attn_mask (Tensor, optional): ByteTensor mask, has shape (num_queries, num_keys), as in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor, optional): The `key_padding_mask` of `self_attn` input. ByteTensor, has shape (bs, num_value). Defaults to None. Returns: Tensor: forwarded results, has shape (bs, num_queries, dim). """ query = self.self_attn( query=query, key=query, value=query, query_pos=query_pos, key_pos=query_pos, attn_mask=self_attn_mask, **kwargs) query = self.norms[0](query) query = self.cross_attn( query=query, key=key, value=value, query_pos=query_pos, key_pos=key_pos, attn_mask=cross_attn_mask, key_padding_mask=key_padding_mask, **kwargs) query = self.norms[1](query) query = self.ffn(query) query = self.norms[2](query) return query
13,965
38.340845
79
py
ERD
ERD-main/mmdet/models/dense_heads/nasfcos_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmdet.models.dense_heads.fcos_head import FCOSHead from mmdet.registry import MODELS from mmdet.utils import OptMultiConfig @MODELS.register_module() class NASFCOSHead(FCOSHead): """Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_. It is quite similar with FCOS head, except for the searched structure of classification branch and bbox regression branch, where a structure of "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points in multiple feature levels. Defaults to (4, 8, 16, 32, 64). regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Defaults to False. center_sample_radius (float): Radius of center sampling. Defaults to 1.5. norm_on_bbox (bool): If true, normalize the regression targets with FPN strides. Defaults to False. centerness_on_reg (bool): If true, position centerness on the regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. Defaults to False. conv_bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Defaults to "auto". loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness loss. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], opitonal): Initialization config dict. """ # noqa: E501 def __init__(self, *args, init_cfg: OptMultiConfig = None, **kwargs) -> None: if init_cfg is None: init_cfg = [ dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), dict( type='Normal', std=0.01, override=[ dict(name='conv_reg'), dict(name='conv_centerness'), dict( name='conv_cls', type='Normal', std=0.01, bias_prob=0.01) ]), ] super().__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" dconv3x3_config = dict( type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [ dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config ] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i, op_ in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = self.in_channels if i == 0 else self.feat_channels assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule( chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
4,776
40.53913
113
py
ERD
ERD-main/mmdet/models/dense_heads/reppoints_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Sequence, Tuple import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList from ..task_modules.prior_generators import MlvlPointGenerator from ..task_modules.samplers import PseudoSampler from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, unmap) from .anchor_free_head import AnchorFreeHead @MODELS.register_module() class RepPointsHead(AnchorFreeHead): """RepPoint head. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. point_feat_channels (int): Number of channels of points features. num_points (int): Number of points. gradient_mul (float): The multiplier to gradients from points refinement and recognition. point_strides (Sequence[int]): points strides. point_base_scale (int): bbox scale for assigning labels. loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox_init (:obj:`ConfigDict` or dict): Config of initial points loss. loss_bbox_refine (:obj:`ConfigDict` or dict): Config of points loss in refinement. use_grid_points (bool): If we use bounding box representation, the reppoints is represented as grid points on the bounding box. center_init (bool): Whether to use center point assignment. transform_method (str): The methods to transform RepPoints to bbox. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. """ # noqa: W605 def __init__(self, num_classes: int, in_channels: int, point_feat_channels: int = 256, num_points: int = 9, gradient_mul: float = 0.1, point_strides: Sequence[int] = [8, 16, 32, 64, 128], point_base_scale: int = 4, loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init: ConfigType = dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), loss_bbox_refine: ConfigType = dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), use_grid_points: bool = False, center_init: bool = True, transform_method: str = 'moment', moment_mul: float = 0.01, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='reppoints_cls_out', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.num_points = num_points self.point_feat_channels = point_feat_channels self.use_grid_points = use_grid_points self.center_init = center_init # we use deform conv to extract points features self.dcn_kernel = int(np.sqrt(num_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) assert self.dcn_kernel * self.dcn_kernel == num_points, \ 'The points number should be a square number.' assert self.dcn_kernel % 2 == 1, \ 'The points number should be an odd square number.' dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, init_cfg=init_cfg, **kwargs) self.gradient_mul = gradient_mul self.point_base_scale = point_base_scale self.point_strides = point_strides self.prior_generator = MlvlPointGenerator( self.point_strides, offset=0.) if self.train_cfg: self.init_assigner = TASK_UTILS.build( self.train_cfg['init']['assigner']) self.refine_assigner = TASK_UTILS.build( self.train_cfg['refine']['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self.transform_method = transform_method if self.transform_method == 'moment': self.moment_transfer = nn.Parameter( data=torch.zeros(2), requires_grad=True) self.moment_mul = moment_mul self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 self.loss_bbox_init = MODELS.build(loss_bbox_init) self.loss_bbox_refine = MODELS.build(loss_bbox_refine) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points self.reppoints_cls_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, self.cls_out_channels, 1, 1, 0) self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, self.point_feat_channels, 3, 1, 1) self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) def points2bbox(self, pts: Tensor, y_first: bool = True) -> Tensor: """Converting the points set into bounding box. Args: pts (Tensor): the input points sets (fields), each points set (fields) is represented as 2n scalar. y_first (bool): if y_first=True, the point set is represented as [y1, x1, y2, x2 ... yn, xn], otherwise the point set is represented as [x1, y1, x2, y2 ... xn, yn]. Defaults to True. Returns: Tensor: each points set is converting to a bbox [x1, y1, x2, y2]. """ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, ...] pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, ...] if self.transform_method == 'minmax': bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'partial_minmax': pts_y = pts_y[:, :4, ...] pts_x = pts_x[:, :4, ...] bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'moment': pts_y_mean = pts_y.mean(dim=1, keepdim=True) pts_x_mean = pts_x.mean(dim=1, keepdim=True) pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) moment_transfer = (self.moment_transfer * self.moment_mul) + ( self.moment_transfer.detach() * (1 - self.moment_mul)) moment_width_transfer = moment_transfer[0] moment_height_transfer = moment_transfer[1] half_width = pts_x_std * torch.exp(moment_width_transfer) half_height = pts_y_std * torch.exp(moment_height_transfer) bbox = torch.cat([ pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width, pts_y_mean + half_height ], dim=1) else: raise NotImplementedError return bbox def gen_grid_from_reg(self, reg: Tensor, previous_boxes: Tensor) -> Tuple[Tensor]: """Base on the previous bboxes and regression values, we compute the regressed bboxes and generate the grids on the bboxes. Args: reg (Tensor): the regression value to previous bboxes. previous_boxes (Tensor): previous bboxes. Returns: Tuple[Tensor]: generate grids on the regressed bboxes. """ b, _, h, w = reg.shape bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. bwh = (previous_boxes[:, 2:, ...] - previous_boxes[:, :2, ...]).clamp(min=1e-6) grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( reg[:, 2:, ...]) grid_wh = bwh * torch.exp(reg[:, 2:, ...]) grid_left = grid_topleft[:, [0], ...] grid_top = grid_topleft[:, [1], ...] grid_width = grid_wh[:, [0], ...] grid_height = grid_wh[:, [1], ...] intervel = torch.linspace(0., 1., self.dcn_kernel).view( 1, self.dcn_kernel, 1, 1).type_as(reg) grid_x = grid_left + grid_width * intervel grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) grid_x = grid_x.view(b, -1, h, w) grid_y = grid_top + grid_height * intervel grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) grid_y = grid_y.view(b, -1, h, w) grid_yx = torch.stack([grid_y, grid_x], dim=2) grid_yx = grid_yx.view(b, -1, h, w) regressed_bbox = torch.cat([ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height ], 1) return grid_yx, regressed_bbox def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]: return multi_apply(self.forward_single, feats) def forward_single(self, x: Tensor) -> Tuple[Tensor]: """Forward feature map of a single FPN level.""" dcn_base_offset = self.dcn_base_offset.type_as(x) # If we use center_init, the initial reppoints is from center points. # If we use bounding bbox representation, the initial reppoints is # from regular grid placed on a pre-defined bbox. if self.use_grid_points or not self.center_init: scale = self.point_base_scale / 2 points_init = dcn_base_offset / dcn_base_offset.max() * scale bbox_init = x.new_tensor([-scale, -scale, scale, scale]).view(1, 4, 1, 1) else: points_init = 0 cls_feat = x pts_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: pts_feat = reg_conv(pts_feat) # initialize reppoints pts_out_init = self.reppoints_pts_init_out( self.relu(self.reppoints_pts_init_conv(pts_feat))) if self.use_grid_points: pts_out_init, bbox_out_init = self.gen_grid_from_reg( pts_out_init, bbox_init.detach()) else: pts_out_init = pts_out_init + points_init # refine and classify reppoints pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( ) + self.gradient_mul * pts_out_init dcn_offset = pts_out_init_grad_mul - dcn_base_offset cls_out = self.reppoints_cls_out( self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) pts_out_refine = self.reppoints_pts_refine_out( self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) if self.use_grid_points: pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( pts_out_refine, bbox_out_init.detach()) else: pts_out_refine = pts_out_refine + pts_out_init.detach() if self.training: return cls_out, pts_out_init, pts_out_refine else: return cls_out, self.points2bbox(pts_out_refine) def get_points(self, featmap_sizes: List[Tuple[int]], batch_img_metas: List[dict], device: str) -> tuple: """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. batch_img_metas (list[dict]): Image meta info. Returns: tuple: points of each image, valid flags of each image """ num_imgs = len(batch_img_metas) # since feature map sizes of all images are the same, we only compute # points center for one time multi_level_points = self.prior_generator.grid_priors( featmap_sizes, device=device, with_stride=True) points_list = [[point.clone() for point in multi_level_points] for _ in range(num_imgs)] # for each image, we compute valid flags of multi level grids valid_flag_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device=device) valid_flag_list.append(multi_level_flags) return points_list, valid_flag_list def centers_to_bboxes(self, point_list: List[Tensor]) -> List[Tensor]: """Get bboxes according to center points. Only used in :class:`MaxIoUAssigner`. """ bbox_list = [] for i_img, point in enumerate(point_list): bbox = [] for i_lvl in range(len(self.point_strides)): scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 bbox_shift = torch.Tensor([-scale, -scale, scale, scale]).view(1, 4).type_as(point[0]) bbox_center = torch.cat( [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift) bbox_list.append(bbox) return bbox_list def offset_to_pts(self, center_list: List[Tensor], pred_list: List[Tensor]) -> List[Tensor]: """Change from point offset to point coordinate.""" pts_list = [] for i_lvl in range(len(self.point_strides)): pts_lvl = [] for i_img in range(len(center_list)): pts_center = center_list[i_img][i_lvl][:, :2].repeat( 1, self.num_points) pts_shift = pred_list[i_lvl][i_img] yx_pts_shift = pts_shift.permute(1, 2, 0).view( -1, 2 * self.num_points) y_pts_shift = yx_pts_shift[..., 0::2] x_pts_shift = yx_pts_shift[..., 1::2] xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center pts_lvl.append(pts) pts_lvl = torch.stack(pts_lvl, 0) pts_list.append(pts_lvl) return pts_list def _get_targets_single(self, flat_proposals: Tensor, valid_flags: Tensor, gt_instances: InstanceData, gt_instances_ignore: InstanceData, stage: str = 'init', unmap_outputs: bool = True) -> tuple: """Compute corresponding GT box and classification targets for proposals. Args: flat_proposals (Tensor): Multi level points of a image. valid_flags (Tensor): Multi level valid flags of a image. gt_instances (InstanceData): It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (InstanceData): It includes ``bboxes`` attribute data that is ignored during training and testing. stage (str): 'init' or 'refine'. Generate target for init stage or refine stage. Defaults to 'init'. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: - labels (Tensor): Labels of each level. - label_weights (Tensor): Label weights of each level. - bbox_targets (Tensor): BBox targets of each level. - bbox_weights (Tensor): BBox weights of each level. - pos_inds (Tensor): positive samples indexes. - neg_inds (Tensor): negative samples indexes. - sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = valid_flags if not inside_flags.any(): raise ValueError( 'There is no valid proposal inside the image boundary. Please ' 'check the image size.') # assign gt and sample proposals proposals = flat_proposals[inside_flags, :] pred_instances = InstanceData(priors=proposals) if stage == 'init': assigner = self.init_assigner pos_weight = self.train_cfg['init']['pos_weight'] else: assigner = self.refine_assigner pos_weight = self.train_cfg['refine']['pos_weight'] assign_result = assigner.assign(pred_instances, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_proposals = proposals.shape[0] bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) pos_proposals = torch.zeros_like(proposals) proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) labels = proposals.new_full((num_valid_proposals, ), self.num_classes, dtype=torch.long) label_weights = proposals.new_zeros( num_valid_proposals, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_gt[pos_inds, :] = sampling_result.pos_gt_bboxes pos_proposals[pos_inds, :] = proposals[pos_inds, :] proposals_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of proposals if unmap_outputs: num_total_proposals = flat_proposals.size(0) labels = unmap( labels, num_total_proposals, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_proposals, inside_flags) bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags) proposals_weights = unmap(proposals_weights, num_total_proposals, inside_flags) return (labels, label_weights, bbox_gt, pos_proposals, proposals_weights, pos_inds, neg_inds, sampling_result) def get_targets(self, proposals_list: List[Tensor], valid_flag_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, stage: str = 'init', unmap_outputs: bool = True, return_sampling_results: bool = False) -> tuple: """Compute corresponding GT box and classification targets for proposals. Args: proposals_list (list[Tensor]): Multi level points/bboxes of each image. valid_flag_list (list[Tensor]): Multi level valid flags of each image. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. stage (str): 'init' or 'refine'. Generate target for init stage or refine stage. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. return_sampling_results (bool): Whether to return the sampling results. Defaults to False. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_gt_list (list[Tensor]): Ground truth bbox of each level. - proposals_list (list[Tensor]): Proposals(points/bboxes) of each level. - proposal_weights_list (list[Tensor]): Proposal weights of each level. - avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. """ assert stage in ['init', 'refine'] num_imgs = len(batch_img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs # points number of multi levels num_level_proposals = [points.size(0) for points in proposals_list[0]] # concat all level points and flags to a single tensor for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, proposals_list, valid_flag_list, batch_gt_instances, batch_gt_instances_ignore, stage=stage, unmap_outputs=unmap_outputs) # sampled points of all images avg_refactor = sum( [results.avg_factor for results in sampling_results_list]) labels_list = images_to_levels(all_labels, num_level_proposals) label_weights_list = images_to_levels(all_label_weights, num_level_proposals) bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) proposals_list = images_to_levels(all_proposals, num_level_proposals) proposal_weights_list = images_to_levels(all_proposal_weights, num_level_proposals) res = (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, avg_refactor) if return_sampling_results: res = res + (sampling_results_list, ) return res def loss_by_feat_single(self, cls_score: Tensor, pts_pred_init: Tensor, pts_pred_refine: Tensor, labels: Tensor, label_weights, bbox_gt_init: Tensor, bbox_weights_init: Tensor, bbox_gt_refine: Tensor, bbox_weights_refine: Tensor, stride: int, avg_factor_init: int, avg_factor_refine: int) -> Tuple[Tensor]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_classes, h_i, w_i). pts_pred_init (Tensor): Points of shape (batch_size, h_i * w_i, num_points * 2). pts_pred_refine (Tensor): Points refined of shape (batch_size, h_i * w_i, num_points * 2). labels (Tensor): Ground truth class indices with shape (batch_size, h_i * w_i). label_weights (Tensor): Label weights of shape (batch_size, h_i * w_i). bbox_gt_init (Tensor): BBox regression targets in the init stage of shape (batch_size, h_i * w_i, 4). bbox_weights_init (Tensor): BBox regression loss weights in the init stage of shape (batch_size, h_i * w_i, 4). bbox_gt_refine (Tensor): BBox regression targets in the refine stage of shape (batch_size, h_i * w_i, 4). bbox_weights_refine (Tensor): BBox regression loss weights in the refine stage of shape (batch_size, h_i * w_i, 4). stride (int): Point stride. avg_factor_init (int): Average factor that is used to average the loss in the init stage. avg_factor_refine (int): Average factor that is used to average the loss in the refine stage. Returns: Tuple[Tensor]: loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) cls_score = cls_score.contiguous() loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor_refine) # points loss bbox_gt_init = bbox_gt_init.reshape(-1, 4) bbox_weights_init = bbox_weights_init.reshape(-1, 4) bbox_pred_init = self.points2bbox( pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) bbox_pred_refine = self.points2bbox( pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) normalize_term = self.point_base_scale * stride loss_pts_init = self.loss_bbox_init( bbox_pred_init / normalize_term, bbox_gt_init / normalize_term, bbox_weights_init, avg_factor=avg_factor_init) loss_pts_refine = self.loss_bbox_refine( bbox_pred_refine / normalize_term, bbox_gt_refine / normalize_term, bbox_weights_refine, avg_factor=avg_factor_refine) return loss_cls, loss_pts_init, loss_pts_refine def loss_by_feat( self, cls_scores: List[Tensor], pts_preds_init: List[Tensor], pts_preds_refine: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, of shape (batch_size, num_classes, h, w). pts_preds_init (list[Tensor]): Points for each scale level, each is a 3D-tensor, of shape (batch_size, h_i * w_i, num_points * 2). pts_preds_refine (list[Tensor]): Points refined for each scale level, each is a 3D-tensor, of shape (batch_size, h_i * w_i, num_points * 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # target for initial stage center_list, valid_flag_list = self.get_points(featmap_sizes, batch_img_metas, device) pts_coordinate_preds_init = self.offset_to_pts(center_list, pts_preds_init) if self.train_cfg['init']['assigner']['type'] == 'PointAssigner': # Assign target for center list candidate_list = center_list else: # transform center list to bbox list and # assign target for bbox list bbox_list = self.centers_to_bboxes(center_list) candidate_list = bbox_list cls_reg_targets_init = self.get_targets( proposals_list=candidate_list, valid_flag_list=valid_flag_list, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, stage='init', return_sampling_results=False) (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, avg_factor_init) = cls_reg_targets_init # target for refinement stage center_list, valid_flag_list = self.get_points(featmap_sizes, batch_img_metas, device) pts_coordinate_preds_refine = self.offset_to_pts( center_list, pts_preds_refine) bbox_list = [] for i_img, center in enumerate(center_list): bbox = [] for i_lvl in range(len(pts_preds_refine)): bbox_preds_init = self.points2bbox( pts_preds_init[i_lvl].detach()) bbox_shift = bbox_preds_init * self.point_strides[i_lvl] bbox_center = torch.cat( [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) bbox_list.append(bbox) cls_reg_targets_refine = self.get_targets( proposals_list=bbox_list, valid_flag_list=valid_flag_list, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, stage='refine', return_sampling_results=False) (labels_list, label_weights_list, bbox_gt_list_refine, candidate_list_refine, bbox_weights_list_refine, avg_factor_refine) = cls_reg_targets_refine # compute loss losses_cls, losses_pts_init, losses_pts_refine = multi_apply( self.loss_by_feat_single, cls_scores, pts_coordinate_preds_init, pts_coordinate_preds_refine, labels_list, label_weights_list, bbox_gt_list_init, bbox_weights_list_init, bbox_gt_list_refine, bbox_weights_list_refine, self.point_strides, avg_factor_init=avg_factor_init, avg_factor_refine=avg_factor_refine) loss_dict_all = { 'loss_cls': losses_cls, 'loss_pts_init': losses_pts_init, 'loss_pts_refine': losses_pts_refine } return loss_dict_all # Same as base_dense_head/_get_bboxes_single except self._bbox_decode def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform outputs of a single image into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. RepPoints head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (:obj:`ConfigDict`): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, priors) in enumerate( zip(cls_score_list, bbox_pred_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, self.point_strides[level_idx], img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def _bbox_decode(self, points: Tensor, bbox_pred: Tensor, stride: int, max_shape: Tuple[int, int]) -> Tensor: """Decode the prediction to bounding box. Args: points (Tensor): shape (h_i * w_i, 2). bbox_pred (Tensor): shape (h_i * w_i, 4). stride (int): Stride for bbox_pred in different level. max_shape (Tuple[int, int]): image shape. Returns: Tensor: Bounding boxes decoded. """ bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * stride + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0]) decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return decoded_bboxes
40,982
45.256208
79
py
ERD
ERD-main/mmdet/models/dense_heads/solov2_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Optional, Tuple import mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.utils.misc import floordiv from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType from ..layers import mask_matrix_nms from ..utils import center_of_mass, generate_coordinate, multi_apply from .solo_head import SOLOHead class MaskFeatModule(BaseModule): """SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast Instance Segmentation. <https://arxiv.org/pdf/2003.10152>`_ Args: in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels of the mask feature map branch. start_level (int): The starting feature map level from RPN that will be used to predict the mask feature map. end_level (int): The ending feature map level from rpn that will be used to predict the mask feature map. out_channels (int): Number of output channels of the mask feature map branch. This is the channel count of the mask feature map that to be dynamically convolved with the predicted kernel. mask_stride (int): Downsample factor of the mask feature map output. Defaults to 4. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, in_channels: int, feat_channels: int, start_level: int, end_level: int, out_channels: int, mask_stride: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01) ] ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.start_level = start_level self.end_level = end_level self.mask_stride = mask_stride assert start_level >= 0 and end_level >= start_level self.out_channels = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self._init_layers() self.fp16_enabled = False def _init_layers(self) -> None: """Initialize layers of the head.""" self.convs_all_levels = nn.ModuleList() for i in range(self.start_level, self.end_level + 1): convs_per_level = nn.Sequential() if i == 0: convs_per_level.add_module( f'conv{i}', ConvModule( self.in_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs_all_levels.append(convs_per_level) continue for j in range(i): if j == 0: if i == self.end_level: chn = self.in_channels + 2 else: chn = self.in_channels convs_per_level.add_module( f'conv{j}', ConvModule( chn, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) convs_per_level.add_module( f'upsample{j}', nn.Upsample( scale_factor=2, mode='bilinear', align_corners=False)) continue convs_per_level.add_module( f'conv{j}', ConvModule( self.feat_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) convs_per_level.add_module( f'upsample{j}', nn.Upsample( scale_factor=2, mode='bilinear', align_corners=False)) self.convs_all_levels.append(convs_per_level) self.conv_pred = ConvModule( self.feat_channels, self.out_channels, 1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, x: Tuple[Tensor]) -> Tensor: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: Tensor: The predicted mask feature map. """ inputs = x[self.start_level:self.end_level + 1] assert len(inputs) == (self.end_level - self.start_level + 1) feature_add_all_level = self.convs_all_levels[0](inputs[0]) for i in range(1, len(inputs)): input_p = inputs[i] if i == len(inputs) - 1: coord_feat = generate_coordinate(input_p.size(), input_p.device) input_p = torch.cat([input_p, coord_feat], 1) feature_add_all_level = feature_add_all_level + \ self.convs_all_levels[i](input_p) feature_pred = self.conv_pred(feature_add_all_level) return feature_pred @MODELS.register_module() class SOLOV2Head(SOLOHead): """SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance Segmentation. <https://arxiv.org/pdf/2003.10152>`_ Args: mask_feature_head (dict): Config of SOLOv2MaskFeatHead. dynamic_conv_size (int): Dynamic Conv kernel size. Defaults to 1. dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv. Defaults to None. dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of kernel_convs and cls_convs, or only the last layer. It shall be set `True` for the normal version of SOLOv2 and `False` for the light-weight version. Defaults to True. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, mask_feature_head: ConfigType, dynamic_conv_size: int = 1, dcn_cfg: OptConfigType = None, dcn_apply_to_all_conv: bool = True, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs) -> None: assert dcn_cfg is None or isinstance(dcn_cfg, dict) self.dcn_cfg = dcn_cfg self.with_dcn = dcn_cfg is not None self.dcn_apply_to_all_conv = dcn_apply_to_all_conv self.dynamic_conv_size = dynamic_conv_size mask_out_channels = mask_feature_head.get('out_channels') self.kernel_out_channels = \ mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size super().__init__(*args, init_cfg=init_cfg, **kwargs) # update the in_channels of mask_feature_head if mask_feature_head.get('in_channels', None) is not None: if mask_feature_head.in_channels != self.in_channels: warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and ' 'SOLOv2Head should be same, changing ' 'mask_feature_head.in_channels to ' f'{self.in_channels}') mask_feature_head.update(in_channels=self.in_channels) else: mask_feature_head.update(in_channels=self.in_channels) self.mask_feature_head = MaskFeatModule(**mask_feature_head) self.mask_stride = self.mask_feature_head.mask_stride self.fp16_enabled = False def _init_layers(self) -> None: """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.kernel_convs = nn.ModuleList() conv_cfg = None for i in range(self.stacked_convs): if self.with_dcn: if self.dcn_apply_to_all_conv: conv_cfg = self.dcn_cfg elif i == self.stacked_convs - 1: # light head conv_cfg = self.dcn_cfg chn = self.in_channels + 2 if i == 0 else self.feat_channels self.kernel_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_kernel = nn.Conv2d( self.feat_channels, self.kernel_out_channels, 3, padding=1) def forward(self, x): """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores, mask prediction, and mask features. - mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). - mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). """ assert len(x) == self.num_levels mask_feats = self.mask_feature_head(x) ins_kernel_feats = self.resize_feats(x) mlvl_kernel_preds = [] mlvl_cls_preds = [] for i in range(self.num_levels): ins_kernel_feat = ins_kernel_feats[i] # ins branch # concat coord coord_feat = generate_coordinate(ins_kernel_feat.size(), ins_kernel_feat.device) ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1) # kernel branch kernel_feat = ins_kernel_feat kernel_feat = F.interpolate( kernel_feat, size=self.num_grids[i], mode='bilinear', align_corners=False) cate_feat = kernel_feat[:, :-2, :, :] kernel_feat = kernel_feat.contiguous() for i, kernel_conv in enumerate(self.kernel_convs): kernel_feat = kernel_conv(kernel_feat) kernel_pred = self.conv_kernel(kernel_feat) # cate branch cate_feat = cate_feat.contiguous() for i, cls_conv in enumerate(self.cls_convs): cate_feat = cls_conv(cate_feat) cate_pred = self.conv_cls(cate_feat) mlvl_kernel_preds.append(kernel_pred) mlvl_cls_preds.append(cate_pred) return mlvl_kernel_preds, mlvl_cls_preds, mask_feats def _get_targets_single(self, gt_instances: InstanceData, featmap_sizes: Optional[list] = None) -> tuple: """Compute targets for predictions of single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Defaults to None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_pos_masks (list[Tensor]): Each element is a `BoolTensor` to represent whether the corresponding point in single level is positive, has shape (num_grid **2). - mlvl_pos_indexes (list[list]): Each element in the list contains the positive index in corresponding level, has shape (num_pos). """ gt_labels = gt_instances.labels device = gt_labels.device gt_bboxes = gt_instances.bboxes gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device) mlvl_pos_mask_targets = [] mlvl_pos_indexes = [] mlvl_labels = [] mlvl_pos_masks = [] for (lower_bound, upper_bound), num_grid \ in zip(self.scale_ranges, self.num_grids): mask_target = [] # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_index = [] labels = torch.zeros([num_grid, num_grid], dtype=torch.int64, device=device) + self.num_classes pos_mask = torch.zeros([num_grid**2], dtype=torch.bool, device=device) gt_inds = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(gt_inds) == 0: mlvl_pos_mask_targets.append( torch.zeros([0, featmap_sizes[0], featmap_sizes[1]], dtype=torch.uint8, device=device)) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) mlvl_pos_indexes.append([]) continue hit_gt_bboxes = gt_bboxes[gt_inds] hit_gt_labels = gt_labels[gt_inds] hit_gt_masks = gt_masks[gt_inds, ...] pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - hit_gt_bboxes[:, 0]) * self.pos_scale pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - hit_gt_bboxes[:, 1]) * self.pos_scale # Make sure hit_gt_masks has a value valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 for gt_mask, gt_label, pos_h_range, pos_w_range, \ valid_mask_flag in \ zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, pos_w_ranges, valid_mask_flags): if not valid_mask_flag: continue upsampled_size = (featmap_sizes[0] * self.mask_stride, featmap_sizes[1] * self.mask_stride) center_h, center_w = center_of_mass(gt_mask) coord_w = int( floordiv((center_w / upsampled_size[1]), (1. / num_grid), rounding_mode='trunc')) coord_h = int( floordiv((center_h / upsampled_size[0]), (1. / num_grid), rounding_mode='trunc')) # left, top, right, down top_box = max( 0, int( floordiv( (center_h - pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) down_box = min( num_grid - 1, int( floordiv( (center_h + pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) left_box = max( 0, int( floordiv( (center_w - pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) right_box = min( num_grid - 1, int( floordiv( (center_w + pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) left = max(coord_w - 1, left_box) right = min(right_box, coord_w + 1) labels[top:(down + 1), left:(right + 1)] = gt_label # ins gt_mask = np.uint8(gt_mask.cpu().numpy()) # Follow the original implementation, F.interpolate is # different from cv2 and opencv gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride) gt_mask = torch.from_numpy(gt_mask).to(device=device) for i in range(top, down + 1): for j in range(left, right + 1): index = int(i * num_grid + j) this_mask_target = torch.zeros( [featmap_sizes[0], featmap_sizes[1]], dtype=torch.uint8, device=device) this_mask_target[:gt_mask.shape[0], :gt_mask. shape[1]] = gt_mask mask_target.append(this_mask_target) pos_mask[index] = True pos_index.append(index) if len(mask_target) == 0: mask_target = torch.zeros( [0, featmap_sizes[0], featmap_sizes[1]], dtype=torch.uint8, device=device) else: mask_target = torch.stack(mask_target, 0) mlvl_pos_mask_targets.append(mask_target) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) mlvl_pos_indexes.append(pos_index) return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks, mlvl_pos_indexes) def loss_by_feat(self, mlvl_kernel_preds: List[Tensor], mlvl_cls_preds: List[Tensor], mask_feats: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = mask_feats.size()[-2:] pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply( self._get_targets_single, batch_gt_instances, featmap_sizes=featmap_sizes) mlvl_mask_targets = [ torch.cat(lvl_mask_targets, 0) for lvl_mask_targets in zip(*pos_mask_targets) ] mlvl_pos_kernel_preds = [] for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds, zip(*pos_indexes)): lvl_pos_kernel_preds = [] for img_lvl_kernel_preds, img_lvl_pos_indexes in zip( lvl_kernel_preds, lvl_pos_indexes): img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view( img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes] lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds) mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds) # make multilevel mlvl_mask_pred mlvl_mask_preds = [] for lvl_pos_kernel_preds in mlvl_pos_kernel_preds: lvl_mask_preds = [] for img_id, img_lvl_pos_kernel_pred in enumerate( lvl_pos_kernel_preds): if img_lvl_pos_kernel_pred.size()[-1] == 0: continue img_mask_feats = mask_feats[[img_id]] h, w = img_mask_feats.shape[-2:] num_kernel = img_lvl_pos_kernel_pred.shape[1] img_lvl_mask_pred = F.conv2d( img_mask_feats, img_lvl_pos_kernel_pred.permute(1, 0).view( num_kernel, -1, self.dynamic_conv_size, self.dynamic_conv_size), stride=1).view(-1, h, w) lvl_mask_preds.append(img_lvl_mask_pred) if len(lvl_mask_preds) == 0: lvl_mask_preds = None else: lvl_mask_preds = torch.cat(lvl_mask_preds, 0) mlvl_mask_preds.append(lvl_mask_preds) # dice loss num_pos = 0 for img_pos_masks in pos_masks: for lvl_img_pos_masks in img_pos_masks: # Fix `Tensor` object has no attribute `count_nonzero()` # in PyTorch 1.6, the type of `lvl_img_pos_masks` # should be `torch.bool`. num_pos += lvl_img_pos_masks.nonzero().numel() loss_mask = [] for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds, mlvl_mask_targets): if lvl_mask_preds is None: continue loss_mask.append( self.loss_mask( lvl_mask_preds, lvl_mask_targets, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = mask_feats.sum() * 0 # cate flatten_labels = [ torch.cat( [img_lvl_labels.flatten() for img_lvl_labels in lvl_labels]) for lvl_labels in zip(*labels) ] flatten_labels = torch.cat(flatten_labels) flatten_cls_preds = [ lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes) for lvl_cls_preds in mlvl_cls_preds ] flatten_cls_preds = torch.cat(flatten_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def predict_by_feat(self, mlvl_kernel_preds: List[Tensor], mlvl_cls_scores: List[Tensor], mask_feats: Tensor, batch_img_metas: List[dict], **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ num_levels = len(mlvl_cls_scores) assert len(mlvl_kernel_preds) == len(mlvl_cls_scores) for lvl in range(num_levels): cls_scores = mlvl_cls_scores[lvl] cls_scores = cls_scores.sigmoid() local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_scores cls_scores = cls_scores * keep_mask mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1) result_list = [] for img_id in range(len(batch_img_metas)): img_cls_pred = [ mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) for lvl in range(num_levels) ] img_mask_feats = mask_feats[[img_id]] img_kernel_pred = [ mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view( -1, self.kernel_out_channels) for lvl in range(num_levels) ] img_cls_pred = torch.cat(img_cls_pred, dim=0) img_kernel_pred = torch.cat(img_kernel_pred, dim=0) result = self._predict_by_feat_single( img_kernel_pred, img_cls_pred, img_mask_feats, img_meta=batch_img_metas[img_id]) result_list.append(result) return result_list def _predict_by_feat_single(self, kernel_preds: Tensor, cls_scores: Tensor, mask_feats: Tensor, img_meta: dict, cfg: OptConfigType = None) -> InstanceData: """Transform a single image's features extracted from the head into mask results. Args: kernel_preds (Tensor): Dynamic kernel prediction of all points in single image, has shape (num_points, kernel_out_channels). cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_feats (Tensor): Mask prediction of all points in single image, has shape (num_points, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(cls_scores, ori_shape): """Generate a empty results.""" results = InstanceData() results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *ori_shape) results.labels = cls_scores.new_ones(0) results.bboxes = cls_scores.new_zeros(0, 4) return results cfg = self.test_cfg if cfg is None else cfg assert len(kernel_preds) == len(cls_scores) featmap_size = mask_feats.size()[-2:] # overall info h, w = img_meta['img_shape'][:2] upsampled_size = (featmap_size[0] * self.mask_stride, featmap_size[1] * self.mask_stride) # process. score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] if len(cls_scores) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) # cate_labels & kernel_preds inds = score_mask.nonzero() cls_labels = inds[:, 1] kernel_preds = kernel_preds[inds[:, 0]] # trans vector. lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) strides = kernel_preds.new_ones(lvl_interval[-1]) strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= self.strides[lvl] strides = strides[inds[:, 0]] # mask encoding. kernel_preds = kernel_preds.view( kernel_preds.size(0), -1, self.dynamic_conv_size, self.dynamic_conv_size) mask_preds = F.conv2d( mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid() # mask. masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) if len(keep_inds) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear', align_corners=False)[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=img_meta['ori_shape'][:2], mode='bilinear', align_corners=False).squeeze(0) masks = mask_preds > cfg.mask_thr results = InstanceData() results.masks = masks results.labels = labels results.scores = scores # create an empty bbox in InstanceData to avoid bugs when # calculating metrics. results.bboxes = results.scores.new_zeros(len(scores), 4) return results
33,478
40.84875
79
py
ERD
ERD-main/mmdet/models/dense_heads/cascade_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. from __future__ import division import copy from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from mmcv.ops import DeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModule, ModuleList from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptInstanceList, OptMultiConfig) from ..task_modules.assigners import RegionAssigner from ..task_modules.samplers import PseudoSampler from ..utils import (images_to_levels, multi_apply, select_single_mlvl, unpack_gt_instances) from .base_dense_head import BaseDenseHead from .rpn_head import RPNHead class AdaptiveConv(BaseModule): """AdaptiveConv used to adapt the sampling location with the anchors. Args: in_channels (int): Number of channels in the input image. out_channels (int): Number of channels produced by the convolution. kernel_size (int or tuple[int]): Size of the conv kernel. Defaults to 3. stride (int or tuple[int]): Stride of the convolution. Defaults to 1. padding (int or tuple[int]): Zero-padding added to both sides of the input. Defaults to 1. dilation (int or tuple[int]): Spacing between kernel elements. Defaults to 3. groups (int): Number of blocked connections from input channels to output channels. Defaults to 1. bias (bool): If set True, adds a learnable bias to the output. Defaults to False. adapt_type (str): Type of adaptive conv, can be either ``offset`` (arbitrary anchors) or 'dilation' (uniform anchor). Defaults to 'dilation'. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict]): Initialization config dict. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int]] = 3, stride: Union[int, Tuple[int]] = 1, padding: Union[int, Tuple[int]] = 1, dilation: Union[int, Tuple[int]] = 3, groups: int = 1, bias: bool = False, adapt_type: str = 'dilation', init_cfg: MultiConfig = dict( type='Normal', std=0.01, override=dict(name='conv')) ) -> None: super().__init__(init_cfg=init_cfg) assert adapt_type in ['offset', 'dilation'] self.adapt_type = adapt_type assert kernel_size == 3, 'Adaptive conv only supports kernels 3' if self.adapt_type == 'offset': assert stride == 1 and padding == 1 and groups == 1, \ 'Adaptive conv offset mode only supports padding: {1}, ' \ f'stride: {1}, groups: {1}' self.conv = DeformConv2d( in_channels, out_channels, kernel_size, padding=padding, stride=stride, groups=groups, bias=bias) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size, padding=dilation, dilation=dilation) def forward(self, x: Tensor, offset: Tensor) -> Tensor: """Forward function.""" if self.adapt_type == 'offset': N, _, H, W = x.shape assert offset is not None assert H * W == offset.shape[1] # reshape [N, NA, 18] to (N, 18, H, W) offset = offset.permute(0, 2, 1).reshape(N, -1, H, W) offset = offset.contiguous() x = self.conv(x, offset) else: assert offset is None x = self.conv(x) return x @MODELS.register_module() class StageCascadeRPNHead(RPNHead): """Stage of CascadeRPNHead. Args: in_channels (int): Number of channels in the input feature map. anchor_generator (:obj:`ConfigDict` or dict): anchor generator config. adapt_cfg (:obj:`ConfigDict` or dict): adaptation config. bridged_feature (bool): whether update rpn feature. Defaults to False. with_cls (bool): whether use classification branch. Defaults to True. init_cfg :obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or list[dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels: int, anchor_generator: ConfigType = dict( type='AnchorGenerator', scales=[8], ratios=[1.0], strides=[4, 8, 16, 32, 64]), adapt_cfg: ConfigType = dict(type='dilation', dilation=3), bridged_feature: bool = False, with_cls: bool = True, init_cfg: OptMultiConfig = None, **kwargs) -> None: self.with_cls = with_cls self.anchor_strides = anchor_generator['strides'] self.anchor_scales = anchor_generator['scales'] self.bridged_feature = bridged_feature self.adapt_cfg = adapt_cfg super().__init__( in_channels=in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) # override sampling and sampler if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) # use PseudoSampler when sampling is False if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) if init_cfg is None: self.init_cfg = dict( type='Normal', std=0.01, override=[dict(name='rpn_reg')]) if self.with_cls: self.init_cfg['override'].append(dict(name='rpn_cls')) def _init_layers(self) -> None: """Init layers of a CascadeRPN stage.""" adapt_cfg = copy.deepcopy(self.adapt_cfg) adapt_cfg['adapt_type'] = adapt_cfg.pop('type') self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels, **adapt_cfg) if self.with_cls: self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) self.relu = nn.ReLU(inplace=True) def forward_single(self, x: Tensor, offset: Tensor) -> Tuple[Tensor]: """Forward function of single scale.""" bridged_x = x x = self.relu(self.rpn_conv(x, offset)) if self.bridged_feature: bridged_x = x # update feature cls_score = self.rpn_cls(x) if self.with_cls else None bbox_pred = self.rpn_reg(x) return bridged_x, cls_score, bbox_pred def forward( self, feats: List[Tensor], offset_list: Optional[List[Tensor]] = None) -> Tuple[List[Tensor]]: """Forward function.""" if offset_list is None: offset_list = [None for _ in range(len(feats))] return multi_apply(self.forward_single, feats, offset_list) def _region_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: InstanceData, featmap_sizes: List[Tuple[int, int]], num_level_anchors: List[int]) -> tuple: """Get anchor targets based on region for single level. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors, ). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. featmap_sizes (list[Tuple[int, int]]): Feature map size each level. num_level_anchors (list[int]): The number of anchors in each level. Returns: tuple: - labels (Tensor): Labels of each level. - label_weights (Tensor): Label weights of each level. - bbox_targets (Tensor): BBox targets of each level. - bbox_weights (Tensor): BBox weights of each level. - pos_inds (Tensor): positive samples indexes. - neg_inds (Tensor): negative samples indexes. - sampling_result (:obj:`SamplingResult`): Sampling results. """ pred_instances = InstanceData() pred_instances.priors = flat_anchors pred_instances.valid_flags = valid_flags assign_result = self.assigner.assign( pred_instances, gt_instances, img_meta, featmap_sizes, num_level_anchors, self.anchor_scales[0], self.anchor_strides, gt_instances_ignore=gt_instances_ignore, allowed_border=self.train_cfg['allowed_border']) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_anchors = flat_anchors.shape[0] bbox_targets = torch.zeros_like(flat_anchors) bbox_weights = torch.zeros_like(flat_anchors) labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long) label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def region_targets( self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], featmap_sizes: List[Tuple[int, int]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, return_sampling_results: bool = False, ) -> tuple: """Compute regression and classification targets for anchors when using RegionAssigner. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. featmap_sizes (list[Tuple[int, int]]): Feature map size each level. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - bbox_weights_list (list[Tensor]): BBox weights of each level. - avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using ``PseudoSampler``, ``avg_factor`` is usually equal to the number of positive priors. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors to a single tensor concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._region_targets_single, concat_anchor_list, concat_valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, featmap_sizes=featmap_sizes, num_level_anchors=num_level_anchors) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) res = (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) if return_sampling_results: res = res + (sampling_results_list, ) return res def get_targets( self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], featmap_sizes: List[Tuple[int, int]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, return_sampling_results: bool = False, ) -> tuple: """Compute regression and classification targets for anchors. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. featmap_sizes (list[Tuple[int, int]]): Feature map size each level. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. return_sampling_results (bool): Whether to return the sampling results. Defaults to False. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - bbox_weights_list (list[Tensor]): BBox weights of each level. - avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using ``PseudoSampler``, ``avg_factor`` is usually equal to the number of positive priors. """ if isinstance(self.assigner, RegionAssigner): cls_reg_targets = self.region_targets( anchor_list, valid_flag_list, featmap_sizes, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=return_sampling_results) else: cls_reg_targets = super().get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=return_sampling_results) return cls_reg_targets def anchor_offset(self, anchor_list: List[List[Tensor]], anchor_strides: List[int], featmap_sizes: List[Tuple[int, int]]) -> List[Tensor]: """ Get offset for deformable conv based on anchor shape NOTE: currently support deformable kernel_size=3 and dilation=1 Args: anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of multi-level anchors anchor_strides (list[int]): anchor stride of each level Returns: list[tensor]: offset of DeformConv kernel with shapes of [NLVL, NA, 2, 18]. """ def _shape_offset(anchors, stride, ks=3, dilation=1): # currently support kernel_size=3 and dilation=1 assert ks == 3 and dilation == 1 pad = (ks - 1) // 2 idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) yy, xx = torch.meshgrid(idx, idx) # return order matters xx = xx.reshape(-1) yy = yy.reshape(-1) w = (anchors[:, 2] - anchors[:, 0]) / stride h = (anchors[:, 3] - anchors[:, 1]) / stride w = w / (ks - 1) - dilation h = h / (ks - 1) - dilation offset_x = w[:, None] * xx # (NA, ks**2) offset_y = h[:, None] * yy # (NA, ks**2) return offset_x, offset_y def _ctr_offset(anchors, stride, featmap_size): feat_h, feat_w = featmap_size assert len(anchors) == feat_h * feat_w x = (anchors[:, 0] + anchors[:, 2]) * 0.5 y = (anchors[:, 1] + anchors[:, 3]) * 0.5 # compute centers on feature map x = x / stride y = y / stride # compute predefine centers xx = torch.arange(0, feat_w, device=anchors.device) yy = torch.arange(0, feat_h, device=anchors.device) yy, xx = torch.meshgrid(yy, xx) xx = xx.reshape(-1).type_as(x) yy = yy.reshape(-1).type_as(y) offset_x = x - xx # (NA, ) offset_y = y - yy # (NA, ) return offset_x, offset_y num_imgs = len(anchor_list) num_lvls = len(anchor_list[0]) dtype = anchor_list[0][0].dtype device = anchor_list[0][0].device num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] offset_list = [] for i in range(num_imgs): mlvl_offset = [] for lvl in range(num_lvls): c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl], anchor_strides[lvl], featmap_sizes[lvl]) s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl], anchor_strides[lvl]) # offset = ctr_offset + shape_offset offset_x = s_offset_x + c_offset_x[:, None] offset_y = s_offset_y + c_offset_y[:, None] # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9) offset = torch.stack([offset_y, offset_x], dim=-1) offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2] mlvl_offset.append(offset) offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2] offset_list = images_to_levels(offset_list, num_level_anchors) return offset_list def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, anchors: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, avg_factor: int) -> tuple: """Loss function on single scale.""" # classification loss if self.with_cls: labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. anchors = anchors.reshape(-1, 4) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_reg = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor) if self.with_cls: return loss_cls, loss_reg return None, loss_reg def loss_by_feat( self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Compute losses of the head. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, featmap_sizes, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets if not sampling_results_list[0].avg_factor_with_neg: # 200 is hard-coded average factor, # which follows guided anchoring. avg_factor = sum([label.numel() for label in labels_list]) / 200.0 # change per image, per level anchor_list to per_level, per_image mlvl_anchor_list = list(zip(*anchor_list)) # concat mlvl_anchor_list mlvl_anchor_list = [ torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list ] losses = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, mlvl_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) if self.with_cls: return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1]) return dict(loss_rpn_reg=losses[1]) def predict_by_feat(self, anchor_list: List[List[Tensor]], cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: List[dict], cfg: Optional[ConfigDict] = None, rescale: bool = False) -> InstanceList: """Get proposal predict. Overriding to enable input ``anchor_list`` from outside. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Image meta info. cfg (:obj:`ConfigDict`, optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) result_list = [] for img_id in range(len(batch_img_metas)): cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) proposals = self._predict_by_feat_single( cls_scores=cls_score_list, bbox_preds=bbox_pred_list, mlvl_anchors=anchor_list[img_id], img_meta=batch_img_metas[img_id], cfg=cfg, rescale=rescale) result_list.append(proposals) return result_list def _predict_by_feat_single(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], mlvl_anchors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False) -> InstanceData: """Transform outputs of a single image into bbox predictions. Args: cls_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_anchors * 4, H, W). mlvl_anchors (list[Tensor]): Box reference from all scale levels of a single image, each item has shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (:obj:`ConfigDict`): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) # bboxes from different level should be independent during NMS, # level_ids are used as labels for batched NMS to separate them level_ids = [] mlvl_scores = [] mlvl_bbox_preds = [] mlvl_valid_anchors = [] nms_pre = cfg.get('nms_pre', -1) for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # We set FG labels to [0, num_class-1] and BG label to # num_class in RPN head since mmdet v2.5, which is unified to # be consistent with other head since mmdet v2.0. In mmdet v2.0 # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = rpn_cls_score.softmax(dim=1)[:, 0] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) anchors = mlvl_anchors[idx] if 0 < nms_pre < scores.shape[0]: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:nms_pre] scores = ranked_scores[:nms_pre] rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] mlvl_scores.append(scores) mlvl_bbox_preds.append(rpn_bbox_pred) mlvl_valid_anchors.append(anchors) level_ids.append( scores.new_full((scores.size(0), ), idx, dtype=torch.long)) anchors = torch.cat(mlvl_valid_anchors) rpn_bbox_pred = torch.cat(mlvl_bbox_preds) bboxes = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_meta['img_shape']) proposals = InstanceData() proposals.bboxes = bboxes proposals.scores = torch.cat(mlvl_scores) proposals.level_ids = torch.cat(level_ids) return self._bbox_post_process( results=proposals, cfg=cfg, rescale=rescale, img_meta=img_meta) def refine_bboxes(self, anchor_list: List[List[Tensor]], bbox_preds: List[Tensor], img_metas: List[dict]) -> List[List[Tensor]]: """Refine bboxes through stages.""" num_levels = len(bbox_preds) new_anchor_list = [] for img_id in range(len(img_metas)): mlvl_anchors = [] for i in range(num_levels): bbox_pred = bbox_preds[i][img_id].detach() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) img_shape = img_metas[img_id]['img_shape'] bboxes = self.bbox_coder.decode(anchor_list[img_id][i], bbox_pred, img_shape) mlvl_anchors.append(bboxes) new_anchor_list.append(mlvl_anchors) return new_anchor_list def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, _, batch_img_metas = outputs featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) if self.adapt_cfg['type'] == 'offset': offset_list = self.anchor_offset(anchor_list, self.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = self(x, offset_list) rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred, batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*rpn_loss_inputs) return losses def loss_and_predict( self, x: Tuple[Tensor], batch_data_samples: SampleList, proposal_cfg: Optional[ConfigDict] = None, ) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Args: x (tuple[Tensor]): Features from FPN. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. proposal_cfg (:obj`ConfigDict`, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. Returns: tuple: the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, _, batch_img_metas = outputs featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) if self.adapt_cfg['type'] == 'offset': offset_list = self.anchor_offset(anchor_list, self.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = self(x, offset_list) rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred, batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*rpn_loss_inputs) predictions = self.predict_by_feat( anchor_list, cls_score, bbox_pred, batch_img_metas=batch_img_metas, cfg=proposal_cfg) return losses, predictions def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, _ = self.get_anchors( featmap_sizes, batch_img_metas, device=device) if self.adapt_cfg['type'] == 'offset': offset_list = self.anchor_offset(anchor_list, self.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = self(x, offset_list) predictions = self.stages[-1].predict_by_feat( anchor_list, cls_score, bbox_pred, batch_img_metas=batch_img_metas, rescale=rescale) return predictions @MODELS.register_module() class CascadeRPNHead(BaseDenseHead): """The CascadeRPNHead will predict more accurate region proposals, which is required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN consists of a sequence of RPNStage to progressively improve the accuracy of the detected proposals. More details can be found in ``https://arxiv.org/abs/1909.06720``. Args: num_stages (int): number of CascadeRPN stages. stages (list[:obj:`ConfigDict` or dict]): list of configs to build the stages. train_cfg (list[:obj:`ConfigDict` or dict]): list of configs at training time each stage. test_cfg (:obj:`ConfigDict` or dict): config at testing time. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict]): Initialization config dict. """ def __init__(self, num_classes: int, num_stages: int, stages: List[ConfigType], train_cfg: List[ConfigType], test_cfg: ConfigType, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) assert num_classes == 1, 'Only support num_classes == 1' assert num_stages == len(stages) self.num_stages = num_stages # Be careful! Pretrained weights cannot be loaded when use # nn.ModuleList self.stages = ModuleList() for i in range(len(stages)): train_cfg_i = train_cfg[i] if train_cfg is not None else None stages[i].update(train_cfg=train_cfg_i) stages[i].update(test_cfg=test_cfg) self.stages.append(MODELS.build(stages[i])) self.train_cfg = train_cfg self.test_cfg = test_cfg def loss_by_feat(self): """loss_by_feat() is implemented in StageCascadeRPNHead.""" pass def predict_by_feat(self): """predict_by_feat() is implemented in StageCascadeRPNHead.""" pass def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, _, batch_img_metas = outputs featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, valid_flag_list = self.stages[0].get_anchors( featmap_sizes, batch_img_metas, device=device) losses = dict() for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred, batch_gt_instances, batch_img_metas) stage_loss = stage.loss_by_feat(*rpn_loss_inputs) for name, value in stage_loss.items(): losses['s{}.{}'.format(i, name)] = value # refine boxes if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, batch_img_metas) return losses def loss_and_predict( self, x: Tuple[Tensor], batch_data_samples: SampleList, proposal_cfg: Optional[ConfigDict] = None, ) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Args: x (tuple[Tensor]): Features from FPN. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. proposal_cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. Returns: tuple: the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, _, batch_img_metas = outputs featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, valid_flag_list = self.stages[0].get_anchors( featmap_sizes, batch_img_metas, device=device) losses = dict() for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred, batch_gt_instances, batch_img_metas) stage_loss = stage.loss_by_feat(*rpn_loss_inputs) for name, value in stage_loss.items(): losses['s{}.{}'.format(i, name)] = value # refine boxes if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, batch_img_metas) predictions = self.stages[-1].predict_by_feat( anchor_list, cls_score, bbox_pred, batch_img_metas=batch_img_metas, cfg=proposal_cfg) return losses, predictions def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] featmap_sizes = [featmap.size()[-2:] for featmap in x] device = x[0].device anchor_list, _ = self.stages[0].get_anchors( featmap_sizes, batch_img_metas, device=device) for i in range(self.num_stages): stage = self.stages[i] if stage.adapt_cfg['type'] == 'offset': offset_list = stage.anchor_offset(anchor_list, stage.anchor_strides, featmap_sizes) else: offset_list = None x, cls_score, bbox_pred = stage(x, offset_list) if i < self.num_stages - 1: anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, batch_img_metas) predictions = self.stages[-1].predict_by_feat( anchor_list, cls_score, bbox_pred, batch_img_metas=batch_img_metas, rescale=rescale) return predictions
48,358
42.527453
79
py
ERD
ERD-main/mmdet/models/dense_heads/vfnet_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple, Union import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmcv.ops import DeformConv2d from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptInstanceList, RangeType, reduce_mean) from ..task_modules.prior_generators import MlvlPointGenerator from ..task_modules.samplers import PseudoSampler from ..utils import multi_apply from .atss_head import ATSSHead from .fcos_head import FCOSHead INF = 1e8 @MODELS.register_module() class VFNetHead(ATSSHead, FCOSHead): """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object Detector.<https://arxiv.org/abs/2008.13367>`_. The VFNet predicts IoU-aware classification scores which mix the object presence confidence and object localization accuracy as the detection score. It is built on the FCOS architecture and uses ATSS for defining positive/negative training examples. The VFNet is trained with Varifocal Loss and empolys star-shaped deformable convolution to extract features for a bbox. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Defaults to False. center_sample_radius (float): Radius of center sampling. Defaults to 1.5. sync_num_pos (bool): If true, synchronize the number of positive examples across GPUs. Defaults to True gradient_mul (float): The multiplier to gradients from bbox refinement and recognition. Defaults to 0.1. bbox_norm_type (str): The bbox normalization type, 'reg_denom' or 'stride'. Defaults to reg_denom loss_cls_fl (:obj:`ConfigDict` or dict): Config of focal loss. use_vfl (bool): If true, use varifocal loss for training. Defaults to True. loss_cls (:obj:`ConfigDict` or dict): Config of varifocal loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss, GIoU Loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization refinement loss, GIoU Loss. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Defaults to norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). use_atss (bool): If true, use ATSS to define positive/negative examples. Defaults to True. anchor_generator (:obj:`ConfigDict` or dict): Config of anchor generator for ATSS. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Example: >>> self = VFNetHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling: bool = False, center_sample_radius: float = 1.5, sync_num_pos: bool = True, gradient_mul: float = 0.1, bbox_norm_type: str = 'reg_denom', loss_cls_fl: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), use_vfl: bool = True, loss_cls: ConfigType = dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox: ConfigType = dict( type='GIoULoss', loss_weight=1.5), loss_bbox_refine: ConfigType = dict( type='GIoULoss', loss_weight=2.0), norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), use_atss: bool = True, reg_decoded_bbox: bool = True, anchor_generator: ConfigType = dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, center_offset=0.0, strides=[8, 16, 32, 64, 128]), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='vfnet_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: # dcn base offsets, adapted from reppoints_head.py self.num_dconv_points = 9 self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super(FCOSHead, self).__init__( num_classes=num_classes, in_channels=in_channels, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.regress_ranges = regress_ranges self.reg_denoms = [ regress_range[-1] for regress_range in regress_ranges ] self.reg_denoms[-1] = self.reg_denoms[-2] * 2 self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.sync_num_pos = sync_num_pos self.bbox_norm_type = bbox_norm_type self.gradient_mul = gradient_mul self.use_vfl = use_vfl if self.use_vfl: self.loss_cls = MODELS.build(loss_cls) else: self.loss_cls = MODELS.build(loss_cls_fl) self.loss_bbox = MODELS.build(loss_bbox) self.loss_bbox_refine = MODELS.build(loss_bbox_refine) # for getting ATSS targets self.use_atss = use_atss self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.anchor_center_offset = anchor_generator['center_offset'] self.num_base_priors = self.prior_generator.num_base_priors[0] if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler() # only be used in `get_atss_targets` when `use_atss` is True self.atss_prior_generator = TASK_UTILS.build(anchor_generator) self.fcos_prior_generator = MlvlPointGenerator( anchor_generator['strides'], self.anchor_center_offset if self.use_atss else 0.5) # In order to reuse the `get_bboxes` in `BaseDenseHead. # Only be used in testing phase. self.prior_generator = self.fcos_prior_generator def _init_layers(self) -> None: """Initialize layers of the head.""" super(FCOSHead, self)._init_cls_convs() super(FCOSHead, self)._init_reg_convs() self.relu = nn.ReLU() self.vfnet_reg_conv = ConvModule( self.feat_channels, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias) self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_reg_refine_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_cls_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: - cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. - bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. - bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, x, self.scales, self.scales_refine, self.strides, self.reg_denoms) def forward_single(self, x: Tensor, scale: Scale, scale_refine: Scale, stride: int, reg_denom: int) -> tuple: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the refined bbox prediction. stride (int): The corresponding stride for feature maps, used to normalize the bbox prediction when bbox_norm_type = 'stride'. reg_denom (int): The corresponding regression range for feature maps, only used to normalize the bbox prediction when bbox_norm_type = 'reg_denom'. Returns: tuple: iou-aware cls scores for each box, bbox predictions and refined bbox predictions of input feature maps. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) # predict the bbox_pred of different level reg_feat_init = self.vfnet_reg_conv(reg_feat) if self.bbox_norm_type == 'reg_denom': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom elif self.bbox_norm_type == 'stride': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * stride else: raise NotImplementedError # compute star deformable convolution offsets # converting dcn_offset to reg_feat.dtype thus VFNet can be # trained with FP16 dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, stride).to(reg_feat.dtype) # refine the bbox_pred reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) bbox_pred_refine = scale_refine( self.vfnet_reg_refine(reg_feat)).float().exp() bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() # predict the iou-aware cls score cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) cls_score = self.vfnet_cls(cls_feat) if self.training: return cls_score, bbox_pred, bbox_pred_refine else: return cls_score, bbox_pred_refine def star_dcn_offset(self, bbox_pred: Tensor, gradient_mul: float, stride: int) -> Tensor: """Compute the star deformable conv offsets. Args: bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). gradient_mul (float): Gradient multiplier. stride (int): The corresponding stride for feature maps, used to project the bbox onto the feature map. Returns: Tensor: The offsets for deformable convolution. """ dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ gradient_mul * bbox_pred # map to the feature map scale bbox_pred_grad_mul = bbox_pred_grad_mul / stride N, C, H, W = bbox_pred.size() x1 = bbox_pred_grad_mul[:, 0, :, :] y1 = bbox_pred_grad_mul[:, 1, :, :] x2 = bbox_pred_grad_mul[:, 2, :, :] y2 = bbox_pred_grad_mul[:, 3, :, :] bbox_pred_grad_mul_offset = bbox_pred.new_zeros( N, 2 * self.num_dconv_points, H, W) bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset return dcn_offset def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], bbox_preds_refine: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Compute loss of the head. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.fcos_prior_generator.grid_priors( featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) labels, label_weights, bbox_targets, bbox_weights = self.get_targets( cls_scores, all_level_points, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and bbox_preds_refine flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels).contiguous() for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred in bbox_preds ] flatten_bbox_preds_refine = [ bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred_refine in bbox_preds_refine ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = torch.where( ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] num_pos = len(pos_inds) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] pos_labels = flatten_labels[pos_inds] # sync num_pos across all gpus if self.sync_num_pos: num_pos_avg_per_gpu = reduce_mean( pos_inds.new_tensor(num_pos).float()).item() num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) else: num_pos_avg_per_gpu = num_pos pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) iou_targets_ini = bbox_overlaps( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_ini = iou_targets_ini.clone().detach() bbox_avg_factor_ini = reduce_mean( bbox_weights_ini.sum()).clamp_(min=1).item() pos_decoded_bbox_preds_refine = \ self.bbox_coder.decode(pos_points, pos_bbox_preds_refine) iou_targets_rf = bbox_overlaps( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_rf = iou_targets_rf.clone().detach() bbox_avg_factor_rf = reduce_mean( bbox_weights_rf.sum()).clamp_(min=1).item() if num_pos > 0: loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), weight=bbox_weights_ini, avg_factor=bbox_avg_factor_ini) loss_bbox_refine = self.loss_bbox_refine( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), weight=bbox_weights_rf, avg_factor=bbox_avg_factor_rf) # build IoU-aware cls_score targets if self.use_vfl: pos_ious = iou_targets_rf.clone().detach() cls_iou_targets = torch.zeros_like(flatten_cls_scores) cls_iou_targets[pos_inds, pos_labels] = pos_ious else: loss_bbox = pos_bbox_preds.sum() * 0 loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 if self.use_vfl: cls_iou_targets = torch.zeros_like(flatten_cls_scores) if self.use_vfl: loss_cls = self.loss_cls( flatten_cls_scores, cls_iou_targets, avg_factor=num_pos_avg_per_gpu) else: loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, weight=label_weights, avg_factor=num_pos_avg_per_gpu) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_bbox_rf=loss_bbox_refine) def get_targets( self, cls_scores: List[Tensor], mlvl_points: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> tuple: """A wrapper for computing ATSS and FCOS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights (Tensor/None): Label weights of all levels. - bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). - bbox_weights (Tensor/None): Bbox weights of all levels. """ if self.use_atss: return self.get_atss_targets(cls_scores, mlvl_points, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) else: self.norm_on_bbox = False return self.get_fcos_targets(mlvl_points, batch_gt_instances) def _get_targets_single(self, *args, **kwargs): """Avoid ambiguity in multiple inheritance.""" if self.use_atss: return ATSSHead._get_targets_single(self, *args, **kwargs) else: return FCOSHead._get_targets_single(self, *args, **kwargs) def get_fcos_targets(self, points: List[Tensor], batch_gt_instances: InstanceList) -> tuple: """Compute FCOS regression and classification targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: - labels (list[Tensor]): Labels of each level. - label_weights: None, to be compatible with ATSS targets. - bbox_targets (list[Tensor]): BBox targets of each level. - bbox_weights: None, to be compatible with ATSS targets. """ labels, bbox_targets = FCOSHead.get_targets(self, points, batch_gt_instances) label_weights = None bbox_weights = None return labels, label_weights, bbox_targets, bbox_weights def get_anchors(self, featmap_sizes: List[Tuple], batch_img_metas: List[dict], device: str = 'cuda') -> tuple: """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. batch_img_metas (list[dict]): Image meta info. device (str): Device for returned tensors Returns: tuple: - anchor_list (list[Tensor]): Anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. """ num_imgs = len(batch_img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.atss_prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_flags = self.atss_prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device=device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def get_atss_targets( self, cls_scores: List[Tensor], mlvl_points: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> tuple: """A wrapper for computing ATSS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights (Tensor): Label weights of all levels. - bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). - bbox_weights (Tensor): Bbox weights of all levels. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len( featmap_sizes ) == self.atss_prior_generator.num_levels == \ self.fcos_prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = ATSSHead.get_targets( self, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=True) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets bbox_targets_list = [ bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list ] num_imgs = len(batch_img_metas) # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format bbox_targets_list = self.transform_bbox_targets( bbox_targets_list, mlvl_points, num_imgs) labels_list = [labels.reshape(-1) for labels in labels_list] label_weights_list = [ label_weights.reshape(-1) for label_weights in label_weights_list ] bbox_weights_list = [ bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list ] label_weights = torch.cat(label_weights_list) bbox_weights = torch.cat(bbox_weights_list) return labels_list, label_weights, bbox_targets_list, bbox_weights def transform_bbox_targets(self, decoded_bboxes: List[Tensor], mlvl_points: List[Tensor], num_imgs: int) -> List[Tensor]: """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. Args: decoded_bboxes (list[Tensor]): Regression targets of each level, in the form of (x1, y1, x2, y2). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). num_imgs (int): the number of images in a batch. Returns: bbox_targets (list[Tensor]): Regression targets of each level in the form of (l, t, r, b). """ # TODO: Re-implemented in Class PointCoder assert len(decoded_bboxes) == len(mlvl_points) num_levels = len(decoded_bboxes) mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] bbox_targets = [] for i in range(num_levels): bbox_target = self.bbox_coder.encode(mlvl_points[i], decoded_bboxes[i]) bbox_targets.append(bbox_target) return bbox_targets def _load_from_state_dict(self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: Union[List[str], str], unexpected_keys: Union[List[str], str], error_msgs: Union[List[str], str]) -> None: """Override the method in the parent class to avoid changing para's name.""" pass
30,913
41.757953
81
py
ERD
ERD-main/mmdet/models/dense_heads/centernet_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv.ops import batched_nms from mmengine.config import ConfigDict from mmengine.model import bias_init_with_prob, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, OptMultiConfig) from ..utils import (gaussian_radius, gen_gaussian_target, get_local_maximum, get_topk_from_heatmap, multi_apply, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead @MODELS.register_module() class CenterNetHead(BaseDenseHead): """Objects as Points Head. CenterHead use center_point to indicate object's position. Paper link <https://arxiv.org/abs/1904.07850> Args: in_channels (int): Number of channel in the input feature map. feat_channels (int): Number of channel in the intermediate feature map. num_classes (int): Number of categories excluding the background category. loss_center_heatmap (:obj:`ConfigDict` or dict): Config of center heatmap loss. Defaults to dict(type='GaussianFocalLoss', loss_weight=1.0) loss_wh (:obj:`ConfigDict` or dict): Config of wh loss. Defaults to dict(type='L1Loss', loss_weight=0.1). loss_offset (:obj:`ConfigDict` or dict): Config of offset loss. Defaults to dict(type='L1Loss', loss_weight=1.0). train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Useless in CenterNet, but we keep this variable for SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CenterNet. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`], optional): Initialization config dict. """ def __init__(self, in_channels: int, feat_channels: int, num_classes: int, loss_center_heatmap: ConfigType = dict( type='GaussianFocalLoss', loss_weight=1.0), loss_wh: ConfigType = dict(type='L1Loss', loss_weight=0.1), loss_offset: ConfigType = dict( type='L1Loss', loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.heatmap_head = self._build_head(in_channels, feat_channels, num_classes) self.wh_head = self._build_head(in_channels, feat_channels, 2) self.offset_head = self._build_head(in_channels, feat_channels, 2) self.loss_center_heatmap = MODELS.build(loss_center_heatmap) self.loss_wh = MODELS.build(loss_wh) self.loss_offset = MODELS.build(loss_offset) self.train_cfg = train_cfg self.test_cfg = test_cfg self.fp16_enabled = False def _build_head(self, in_channels: int, feat_channels: int, out_channels: int) -> nn.Sequential: """Build head for each branch.""" layer = nn.Sequential( nn.Conv2d(in_channels, feat_channels, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(feat_channels, out_channels, kernel_size=1)) return layer def init_weights(self) -> None: """Initialize weights of the head.""" bias_init = bias_init_with_prob(0.1) self.heatmap_head[-1].bias.data.fill_(bias_init) for head in [self.wh_head, self.offset_head]: for m in head.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) def forward(self, x: Tuple[Tensor, ...]) -> Tuple[List[Tensor]]: """Forward features. Notice CenterNet head does not use FPN. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: center_heatmap_preds (list[Tensor]): center predict heatmaps for all levels, the channels number is num_classes. wh_preds (list[Tensor]): wh predicts for all levels, the channels number is 2. offset_preds (list[Tensor]): offset predicts for all levels, the channels number is 2. """ return multi_apply(self.forward_single, x) def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]: """Forward feature of a single level. Args: x (Tensor): Feature of a single level. Returns: center_heatmap_pred (Tensor): center predict heatmaps, the channels number is num_classes. wh_pred (Tensor): wh predicts, the channels number is 2. offset_pred (Tensor): offset predicts, the channels number is 2. """ center_heatmap_pred = self.heatmap_head(x).sigmoid() wh_pred = self.wh_head(x) offset_pred = self.offset_head(x) return center_heatmap_pred, wh_pred, offset_pred def loss_by_feat( self, center_heatmap_preds: List[Tensor], wh_preds: List[Tensor], offset_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Compute losses of the head. Args: center_heatmap_preds (list[Tensor]): center predict heatmaps for all levels with shape (B, num_classes, H, W). wh_preds (list[Tensor]): wh predicts for all levels with shape (B, 2, H, W). offset_preds (list[Tensor]): offset predicts for all levels with shape (B, 2, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: which has components below: - loss_center_heatmap (Tensor): loss of center heatmap. - loss_wh (Tensor): loss of hw heatmap - loss_offset (Tensor): loss of offset heatmap. """ assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 center_heatmap_pred = center_heatmap_preds[0] wh_pred = wh_preds[0] offset_pred = offset_preds[0] gt_bboxes = [ gt_instances.bboxes for gt_instances in batch_gt_instances ] gt_labels = [ gt_instances.labels for gt_instances in batch_gt_instances ] img_shape = batch_img_metas[0]['batch_input_shape'] target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels, center_heatmap_pred.shape, img_shape) center_heatmap_target = target_result['center_heatmap_target'] wh_target = target_result['wh_target'] offset_target = target_result['offset_target'] wh_offset_target_weight = target_result['wh_offset_target_weight'] # Since the channel of wh_target and offset_target is 2, the avg_factor # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset. loss_center_heatmap = self.loss_center_heatmap( center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor) loss_wh = self.loss_wh( wh_pred, wh_target, wh_offset_target_weight, avg_factor=avg_factor * 2) loss_offset = self.loss_offset( offset_pred, offset_target, wh_offset_target_weight, avg_factor=avg_factor * 2) return dict( loss_center_heatmap=loss_center_heatmap, loss_wh=loss_wh, loss_offset=loss_offset) def get_targets(self, gt_bboxes: List[Tensor], gt_labels: List[Tensor], feat_shape: tuple, img_shape: tuple) -> Tuple[dict, int]: """Compute regression and classification targets in multiple images. Args: gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box. feat_shape (tuple): feature map shape with value [B, _, H, W] img_shape (tuple): image shape. Returns: tuple[dict, float]: The float value is mean avg_factor, the dict has components below: - center_heatmap_target (Tensor): targets of center heatmap, \ shape (B, num_classes, H, W). - wh_target (Tensor): targets of wh predict, shape \ (B, 2, H, W). - offset_target (Tensor): targets of offset predict, shape \ (B, 2, H, W). - wh_offset_target_weight (Tensor): weights of wh and offset \ predict, shape (B, 2, H, W). """ img_h, img_w = img_shape[:2] bs, _, feat_h, feat_w = feat_shape width_ratio = float(feat_w / img_w) height_ratio = float(feat_h / img_h) center_heatmap_target = gt_bboxes[-1].new_zeros( [bs, self.num_classes, feat_h, feat_w]) wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) wh_offset_target_weight = gt_bboxes[-1].new_zeros( [bs, 2, feat_h, feat_w]) for batch_id in range(bs): gt_bbox = gt_bboxes[batch_id] gt_label = gt_labels[batch_id] center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2 center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2 gt_centers = torch.cat((center_x, center_y), dim=1) for j, ct in enumerate(gt_centers): ctx_int, cty_int = ct.int() ctx, cty = ct scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio radius = gaussian_radius([scale_box_h, scale_box_w], min_overlap=0.3) radius = max(0, int(radius)) ind = gt_label[j] gen_gaussian_target(center_heatmap_target[batch_id, ind], [ctx_int, cty_int], radius) wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1 avg_factor = max(1, center_heatmap_target.eq(1).sum()) target_result = dict( center_heatmap_target=center_heatmap_target, wh_target=wh_target, offset_target=offset_target, wh_offset_target_weight=wh_offset_target_weight) return target_result, avg_factor def predict_by_feat(self, center_heatmap_preds: List[Tensor], wh_preds: List[Tensor], offset_preds: List[Tensor], batch_img_metas: Optional[List[dict]] = None, rescale: bool = True, with_nms: bool = False) -> InstanceList: """Transform network output for a batch into bbox predictions. Args: center_heatmap_preds (list[Tensor]): Center predict heatmaps for all levels with shape (B, num_classes, H, W). wh_preds (list[Tensor]): WH predicts for all levels with shape (B, 2, H, W). offset_preds (list[Tensor]): Offset predicts for all levels with shape (B, 2, H, W). batch_img_metas (list[dict], optional): Batch image meta info. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to True. with_nms (bool): If True, do nms before return boxes. Defaults to False. Returns: list[:obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(center_heatmap_preds) == len(wh_preds) == len( offset_preds) == 1 result_list = [] for img_id in range(len(batch_img_metas)): result_list.append( self._predict_by_feat_single( center_heatmap_preds[0][img_id:img_id + 1, ...], wh_preds[0][img_id:img_id + 1, ...], offset_preds[0][img_id:img_id + 1, ...], batch_img_metas[img_id], rescale=rescale, with_nms=with_nms)) return result_list def _predict_by_feat_single(self, center_heatmap_pred: Tensor, wh_pred: Tensor, offset_pred: Tensor, img_meta: dict, rescale: bool = True, with_nms: bool = False) -> InstanceData: """Transform outputs of a single image into bbox results. Args: center_heatmap_pred (Tensor): Center heatmap for current level with shape (1, num_classes, H, W). wh_pred (Tensor): WH heatmap for current level with shape (1, num_classes, H, W). offset_pred (Tensor): Offset for current level with shape (1, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Defaults to True. with_nms (bool): If True, do nms before return boxes. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ batch_det_bboxes, batch_labels = self._decode_heatmap( center_heatmap_pred, wh_pred, offset_pred, img_meta['batch_input_shape'], k=self.test_cfg.topk, kernel=self.test_cfg.local_maximum_kernel) det_bboxes = batch_det_bboxes.view([-1, 5]) det_labels = batch_labels.view(-1) batch_border = det_bboxes.new_tensor(img_meta['border'])[..., [2, 0, 2, 0]] det_bboxes[..., :4] -= batch_border if rescale and 'scale_factor' in img_meta: det_bboxes[..., :4] /= det_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) if with_nms: det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, self.test_cfg) results = InstanceData() results.bboxes = det_bboxes[..., :4] results.scores = det_bboxes[..., 4] results.labels = det_labels return results def _decode_heatmap(self, center_heatmap_pred: Tensor, wh_pred: Tensor, offset_pred: Tensor, img_shape: tuple, k: int = 100, kernel: int = 3) -> Tuple[Tensor, Tensor]: """Transform outputs into detections raw bbox prediction. Args: center_heatmap_pred (Tensor): center predict heatmap, shape (B, num_classes, H, W). wh_pred (Tensor): wh predict, shape (B, 2, H, W). offset_pred (Tensor): offset predict, shape (B, 2, H, W). img_shape (tuple): image shape in hw format. k (int): Get top k center keypoints from heatmap. Defaults to 100. kernel (int): Max pooling kernel for extract local maximum pixels. Defaults to 3. Returns: tuple[Tensor]: Decoded output of CenterNetHead, containing the following Tensors: - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5) - batch_topk_labels (Tensor): Categories of each box with \ shape (B, k) """ height, width = center_heatmap_pred.shape[2:] inp_h, inp_w = img_shape center_heatmap_pred = get_local_maximum( center_heatmap_pred, kernel=kernel) *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( center_heatmap_pred, k=k) batch_scores, batch_index, batch_topk_labels = batch_dets wh = transpose_and_gather_feat(wh_pred, batch_index) offset = transpose_and_gather_feat(offset_pred, batch_index) topk_xs = topk_xs + offset[..., 0] topk_ys = topk_ys + offset[..., 1] tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width) tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height) br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width) br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height) batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2) batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]), dim=-1) return batch_bboxes, batch_topk_labels def _bboxes_nms(self, bboxes: Tensor, labels: Tensor, cfg: ConfigDict) -> Tuple[Tensor, Tensor]: """bboxes nms.""" if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels
19,870
43.354911
79
py
ERD
ERD-main/mmdet/models/dense_heads/rtmdet_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, Scale, is_norm from mmengine.model import bias_init_with_prob, constant_init, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import distance2bbox from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean from ..layers.transformer import inverse_sigmoid from ..task_modules import anchor_inside_flags from ..utils import (images_to_levels, multi_apply, sigmoid_geometric_mean, unmap) from .atss_head import ATSSHead @MODELS.register_module() class RTMDetHead(ATSSHead): """Detection Head of RTMDet. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. with_objectness (bool): Whether to add an objectness branch. Defaults to True. act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. Default: dict(type='ReLU') """ def __init__(self, num_classes: int, in_channels: int, with_objectness: bool = True, act_cfg: ConfigType = dict(type='ReLU'), **kwargs) -> None: self.act_cfg = act_cfg self.with_objectness = with_objectness super().__init__(num_classes, in_channels, **kwargs) if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) def _init_layers(self): """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) pred_pad_size = self.pred_kernel_size // 2 self.rtm_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, self.pred_kernel_size, padding=pred_pad_size) self.rtm_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, self.pred_kernel_size, padding=pred_pad_size) if self.with_objectness: self.rtm_obj = nn.Conv2d( self.feat_channels, 1, self.pred_kernel_size, padding=pred_pad_size) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def init_weights(self) -> None: """Initialize weights of the head.""" for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) bias_cls = bias_init_with_prob(0.01) normal_init(self.rtm_cls, std=0.01, bias=bias_cls) normal_init(self.rtm_reg, std=0.01) if self.with_objectness: normal_init(self.rtm_obj, std=0.01, bias=bias_cls) def forward(self, feats: Tuple[Tensor, ...]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. """ cls_scores = [] bbox_preds = [] for idx, (x, scale, stride) in enumerate( zip(feats, self.scales, self.prior_generator.strides)): cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.rtm_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) if self.with_objectness: objectness = self.rtm_obj(reg_feat) cls_score = inverse_sigmoid( sigmoid_geometric_mean(cls_score, objectness)) reg_dist = scale(self.rtm_reg(reg_feat).exp()).float() * stride[0] cls_scores.append(cls_score) bbox_preds.append(reg_dist) return tuple(cls_scores), tuple(bbox_preds) def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, assign_metrics: Tensor, stride: List[int]): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Decoded bboxes for each scale level with shape (N, num_anchors * 4, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors). bbox_targets (Tensor): BBox regression targets of each anchor with shape (N, num_total_anchors, 4). assign_metrics (Tensor): Assign metrics with shape (N, num_total_anchors). stride (List[int]): Downsample stride of the feature map. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.reshape(-1, 4) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) assign_metrics = assign_metrics.reshape(-1) label_weights = label_weights.reshape(-1) targets = (labels, assign_metrics) loss_cls = self.loss_cls( cls_score, targets, label_weights, avg_factor=1.0) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_decode_bbox_pred = pos_bbox_pred pos_decode_bbox_targets = pos_bbox_targets # regression loss pos_bbox_weight = assign_metrics[pos_inds] loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=pos_bbox_weight, avg_factor=1.0) else: loss_bbox = bbox_pred.sum() * 0 pos_bbox_weight = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, assign_metrics.sum(), pos_bbox_weight.sum() def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Decoded box for each scale level with shape (N, num_anchors * 4, H, W) in [tl_x, tl_y, br_x, br_y] format. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) flatten_cls_scores = torch.cat([ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ], 1) decoded_bboxes = [] for anchor, bbox_pred in zip(anchor_list[0], bbox_preds): anchor = anchor.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) bbox_pred = distance2bbox(anchor, bbox_pred) decoded_bboxes.append(bbox_pred) flatten_bboxes = torch.cat(decoded_bboxes, 1) cls_reg_targets = self.get_targets( flatten_cls_scores, flatten_bboxes, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, assign_metrics_list, sampling_results_list) = cls_reg_targets losses_cls, losses_bbox,\ cls_avg_factors, bbox_avg_factors = multi_apply( self.loss_by_feat_single, cls_scores, decoded_bboxes, labels_list, label_weights_list, bbox_targets_list, assign_metrics_list, self.prior_generator.strides) cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) bbox_avg_factor = reduce_mean( sum(bbox_avg_factors)).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) def get_targets(self, cls_scores: Tensor, bbox_preds: Tensor, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs=True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores (Tensor): Classification predictions of images, a 3D-Tensor with shape [num_imgs, num_priors, num_classes]. bbox_preds (Tensor): Decoded bboxes predictions of one image, a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: a tuple containing learning targets. - anchors_list (list[list[Tensor]]): Anchors of each level. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - assign_metrics_list (list[Tensor]): alignment metrics of each level. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs # anchor_list: list(b * [-1, 4]) (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_assign_metrics, sampling_results_list) = multi_apply( self._get_targets_single, cls_scores.detach(), bbox_preds.detach(), anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) assign_metrics_list = images_to_levels(all_assign_metrics, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, assign_metrics_list, sampling_results_list) def _get_targets_single(self, cls_scores: Tensor, bbox_preds: Tensor, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: cls_scores (list(Tensor)): Box scores for each image. bbox_preds (list(Tensor)): Box energies / deltas for each image. flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: N is the number of total anchors in the image. - anchors (Tensor): All anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with shape (N,). - label_weights (Tensor): Label weights of all anchor in the image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). - norm_alignment_metrics (Tensor): Normalized alignment metrics of all priors in the image with shape (N,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] pred_instances = InstanceData( scores=cls_scores[inside_flags, :], bboxes=bbox_preds[inside_flags, :], priors=anchors) assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) assign_metrics = anchors.new_zeros( num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: # point-based pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 class_assigned_gt_inds = torch.unique( sampling_result.pos_assigned_gt_inds) for gt_inds in class_assigned_gt_inds: gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds == gt_inds] assign_metrics[gt_class_inds] = assign_result.max_overlaps[ gt_class_inds] # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) assign_metrics = unmap(assign_metrics, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, assign_metrics, sampling_result) def get_anchors(self, featmap_sizes: List[tuple], batch_img_metas: List[dict], device: Union[torch.device, str] = 'cuda') \ -> Tuple[List[List[Tensor]], List[List[Tensor]]]: """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. batch_img_metas (list[dict]): Image meta info. device (torch.device or str): Device for returned tensors. Defaults to cuda. Returns: tuple: - anchor_list (list[list[Tensor]]): Anchors of each image. - valid_flag_list (list[list[Tensor]]): Valid flags of each image. """ num_imgs = len(batch_img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device, with_stride=True) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list @MODELS.register_module() class RTMDetSepBNHead(RTMDetHead): """RTMDetHead with separated BN layers and shared conv layers. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. share_conv (bool): Whether to share conv layers between stages. Defaults to True. use_depthwise (bool): Whether to use depthwise separable convolution in head. Defaults to False. norm_cfg (:obj:`ConfigDict` or dict)): Config dict for normalization layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). act_cfg (:obj:`ConfigDict` or dict)): Config dict for activation layer. Defaults to dict(type='SiLU'). pred_kernel_size (int): Kernel size of prediction layer. Defaults to 1. """ def __init__(self, num_classes: int, in_channels: int, share_conv: bool = True, use_depthwise: bool = False, norm_cfg: ConfigType = dict( type='BN', momentum=0.03, eps=0.001), act_cfg: ConfigType = dict(type='SiLU'), pred_kernel_size: int = 1, exp_on_reg=False, **kwargs) -> None: self.share_conv = share_conv self.exp_on_reg = exp_on_reg self.use_depthwise = use_depthwise super().__init__( num_classes, in_channels, norm_cfg=norm_cfg, act_cfg=act_cfg, pred_kernel_size=pred_kernel_size, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.rtm_cls = nn.ModuleList() self.rtm_reg = nn.ModuleList() if self.with_objectness: self.rtm_obj = nn.ModuleList() for n in range(len(self.prior_generator.strides)): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels cls_convs.append( conv( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_convs.append( conv( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) self.rtm_cls.append( nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, self.pred_kernel_size, padding=self.pred_kernel_size // 2)) self.rtm_reg.append( nn.Conv2d( self.feat_channels, self.num_base_priors * 4, self.pred_kernel_size, padding=self.pred_kernel_size // 2)) if self.with_objectness: self.rtm_obj.append( nn.Conv2d( self.feat_channels, 1, self.pred_kernel_size, padding=self.pred_kernel_size // 2)) if self.share_conv: for n in range(len(self.prior_generator.strides)): for i in range(self.stacked_convs): self.cls_convs[n][i].conv = self.cls_convs[0][i].conv self.reg_convs[n][i].conv = self.reg_convs[0][i].conv def init_weights(self) -> None: """Initialize weights of the head.""" for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) bias_cls = bias_init_with_prob(0.01) for rtm_cls, rtm_reg in zip(self.rtm_cls, self.rtm_reg): normal_init(rtm_cls, std=0.01, bias=bias_cls) normal_init(rtm_reg, std=0.01) if self.with_objectness: for rtm_obj in self.rtm_obj: normal_init(rtm_obj, std=0.01, bias=bias_cls) def forward(self, feats: Tuple[Tensor, ...]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (tuple[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. - bbox_preds (tuple[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for idx, (x, stride) in enumerate( zip(feats, self.prior_generator.strides)): cls_feat = x reg_feat = x for cls_layer in self.cls_convs[idx]: cls_feat = cls_layer(cls_feat) cls_score = self.rtm_cls[idx](cls_feat) for reg_layer in self.reg_convs[idx]: reg_feat = reg_layer(reg_feat) if self.with_objectness: objectness = self.rtm_obj[idx](reg_feat) cls_score = inverse_sigmoid( sigmoid_geometric_mean(cls_score, objectness)) if self.exp_on_reg: reg_dist = self.rtm_reg[idx](reg_feat).exp() * stride[0] else: reg_dist = self.rtm_reg[idx](reg_feat) * stride[0] cls_scores.append(cls_score) bbox_preds.append(reg_dist) return tuple(cls_scores), tuple(bbox_preds)
29,566
41.665224
79
py
ERD
ERD-main/mmdet/models/dense_heads/fsaf_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple import numpy as np import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig from ..losses.accuracy import accuracy from ..losses.utils import weight_reduce_loss from ..task_modules.prior_generators import anchor_inside_flags from ..utils import images_to_levels, multi_apply, unmap from .retina_head import RetinaHead @MODELS.register_module() class FSAFHead(RetinaHead): """Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors (num_anchors is 1 for anchor- free methods) Args: *args: Same as its base class in :class:`RetinaHead` score_threshold (float, optional): The score_threshold to calculate positive recall. If given, prediction scores lower than this value is counted as incorrect prediction. Defaults to None. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. **kwargs: Same as its base class in :class:`RetinaHead` Example: >>> import torch >>> self = FSAFHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == self.num_classes >>> assert box_per_anchor == 4 """ def __init__(self, *args, score_threshold: Optional[float] = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: # The positive bias in self.retina_reg conv is to prevent predicted \ # bbox with 0 area if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_reg', std=0.01, bias=0.25) ]) super().__init__(*args, init_cfg=init_cfg, **kwargs) self.score_threshold = score_threshold def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward feature map of a single scale level. Args: x (Tensor): Feature map of a single scale level. Returns: tuple[Tensor, Tensor]: - cls_score (Tensor): Box scores for each scale level Has \ shape (N, num_points * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale \ level with shape (N, num_points * 4, H, W). """ cls_score, bbox_pred = super().forward_single(x) # relu: TBLR encoder only accepts positive bbox_pred return cls_score, self.relu(bbox_pred) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. Most of the codes are the same with the base class :obj: `AnchorHead`, except that it also collects and returns the matched gt index in the image (from 0 to num_gt-1). If the anchor bbox is not matched to any gt, the corresponding value in pos_gt_inds is -1. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors, ). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # Assign gt and sample anchors anchors = flat_anchors[inside_flags.type(torch.bool), :] pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros( (num_valid_anchors, self.cls_out_channels), dtype=torch.float) pos_gt_inds = anchors.new_full((num_valid_anchors, ), -1, dtype=torch.long) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, both # the predicted boxes and regression targets should be with # absolute coordinate format. pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # The assigned gt_index for each anchor. (0-based) pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # shadowed_labels is a tensor composed of tuples # (anchor_inds, class_label) that indicate those anchors lying in the # outer region of a gt or overlapped by another gt with a smaller # area. # # Therefore, only the shadowed labels are ignored for loss calculation. # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner` shadowed_labels = assign_result.get_extra_property('shadowed_labels') if shadowed_labels is not None and shadowed_labels.numel(): if len(shadowed_labels.shape) == 2: idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1] assert (labels[idx_] != label_).all(), \ 'One label cannot be both positive and ignored' label_weights[idx_, label_] = 0 else: label_weights[shadowed_labels] = 0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) pos_gt_inds = unmap( pos_gt_inds, num_total_anchors, inside_flags, fill=-1) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result, pos_gt_inds) def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ for i in range(len(bbox_preds)): # loop over fpn level # avoid 0 area of the predicted bbox bbox_preds[i] = bbox_preds[i].clamp(min=1e-4) # TODO: It may directly use the base-class loss function. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels batch_size = len(batch_img_metas) device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list, pos_assigned_gt_inds_list) = cls_reg_targets num_gts = np.array(list(map(len, batch_gt_instances))) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned # gt index of each anchor bbox in each fpn level. cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size for i, assign in enumerate(pos_assigned_gt_inds_list): # loop over fpn levels for j in range(1, batch_size): # loop over batch size # Convert gt indices in each img to those in the batch assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1]) pos_assigned_gt_inds_list[i] = assign.flatten() labels_list[i] = labels_list[i].flatten() num_gts = num_gts.sum() # total number of gt in the batch # The unique label index of each gt in the batch label_sequence = torch.arange(num_gts, device=device) # Collect the average loss of each gt in each level with torch.no_grad(): loss_levels, = multi_apply( self.collect_loss_level_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_seq=label_sequence) # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level loss_levels = torch.stack(loss_levels, dim=0) # Locate the best fpn level for loss back-propagation if loss_levels.numel() == 0: # zero gt argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long) else: _, argmin = loss_levels.min(dim=0) # Reweight the loss of each (anchor, label) pair, so that only those # at the best gt level are back-propagated. losses_cls, losses_bbox, pos_inds = multi_apply( self.reweight_loss_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_list, list(range(len(losses_cls))), min_levels=argmin) num_pos = torch.cat(pos_inds, 0).sum().float() pos_recall = self.calculate_pos_recall(cls_scores, labels_list, pos_inds) if num_pos == 0: # No gt num_total_neg = sum( [results.num_neg for results in sampling_results_list]) avg_factor = num_pos + num_total_neg else: avg_factor = num_pos for i in range(len(losses_cls)): losses_cls[i] /= avg_factor losses_bbox[i] /= avg_factor return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, num_pos=num_pos / batch_size, pos_recall=pos_recall) def calculate_pos_recall(self, cls_scores: List[Tensor], labels_list: List[Tensor], pos_inds: List[Tensor]) -> Tensor: """Calculate positive recall with score threshold. Args: cls_scores (list[Tensor]): Classification scores at all fpn levels. Each tensor is in shape (N, num_classes * num_anchors, H, W) labels_list (list[Tensor]): The label that each anchor is assigned to. Shape (N * H * W * num_anchors, ) pos_inds (list[Tensor]): List of bool tensors indicating whether the anchor is assigned to a positive label. Shape (N * H * W * num_anchors, ) Returns: Tensor: A single float number indicating the positive recall. """ with torch.no_grad(): num_class = self.num_classes scores = [ cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos] for cls, pos in zip(cls_scores, pos_inds) ] labels = [ label.reshape(-1)[pos] for label, pos in zip(labels_list, pos_inds) ] scores = torch.cat(scores, dim=0) labels = torch.cat(labels, dim=0) if self.use_sigmoid_cls: scores = scores.sigmoid() else: scores = scores.softmax(dim=1) return accuracy(scores, labels, thresh=self.score_threshold) def collect_loss_level_single(self, cls_loss: Tensor, reg_loss: Tensor, assigned_gt_inds: Tensor, labels_seq: Tensor) -> Tensor: """Get the average loss in each FPN level w.r.t. each gt label. Args: cls_loss (Tensor): Classification loss of each feature map pixel, shape (num_anchor, num_class) reg_loss (Tensor): Regression loss of each feature map pixel, shape (num_anchor, 4) assigned_gt_inds (Tensor): It indicates which gt the prior is assigned to (0-based, -1: no assignment). shape (num_anchor), labels_seq: The rank of labels. shape (num_gt) Returns: Tensor: shape (num_gt), average loss of each gt in this level """ if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4) reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims if len(cls_loss.shape) == 2: cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims loss = cls_loss + reg_loss assert loss.size(0) == assigned_gt_inds.size(0) # Default loss value is 1e6 for a layer where no anchor is positive # to ensure it will not be chosen to back-propagate gradient losses_ = loss.new_full(labels_seq.shape, 1e6) for i, l in enumerate(labels_seq): match = assigned_gt_inds == l if match.any(): losses_[i] = loss[match].mean() return losses_, def reweight_loss_single(self, cls_loss: Tensor, reg_loss: Tensor, assigned_gt_inds: Tensor, labels: Tensor, level: int, min_levels: Tensor) -> tuple: """Reweight loss values at each level. Reassign loss values at each level by masking those where the pre-calculated loss is too large. Then return the reduced losses. Args: cls_loss (Tensor): Element-wise classification loss. Shape: (num_anchors, num_classes) reg_loss (Tensor): Element-wise regression loss. Shape: (num_anchors, 4) assigned_gt_inds (Tensor): The gt indices that each anchor bbox is assigned to. -1 denotes a negative anchor, otherwise it is the gt index (0-based). Shape: (num_anchors, ), labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ). level (int): The current level index in the pyramid (0-4 for RetinaNet) min_levels (Tensor): The best-matching level for each gt. Shape: (num_gts, ), Returns: tuple: - cls_loss: Reduced corrected classification loss. Scalar. - reg_loss: Reduced corrected regression loss. Scalar. - pos_flags (Tensor): Corrected bool tensor indicating the \ final positive anchors. Shape: (num_anchors, ). """ loc_weight = torch.ones_like(reg_loss) cls_weight = torch.ones_like(cls_loss) pos_flags = assigned_gt_inds >= 0 # positive pixel flag pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten() if pos_flags.any(): # pos pixels exist pos_assigned_gt_inds = assigned_gt_inds[pos_flags] zeroing_indices = (min_levels[pos_assigned_gt_inds] != level) neg_indices = pos_indices[zeroing_indices] if neg_indices.numel(): pos_flags[neg_indices] = 0 loc_weight[neg_indices] = 0 # Only the weight corresponding to the label is # zeroed out if not selected zeroing_labels = labels[neg_indices] assert (zeroing_labels >= 0).all() cls_weight[neg_indices, zeroing_labels] = 0 # Weighted loss for both cls and reg loss cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum') reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum') return cls_loss, reg_loss, pos_flags
20,957
44.660131
79
py
ERD
ERD-main/mmdet/models/dense_heads/atss_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from ..task_modules.prior_generators import anchor_inside_flags from ..utils import images_to_levels, multi_apply, unmap from .anchor_head import AnchorHead @MODELS.register_module() class ATSSHead(AnchorHead): """Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_. ATSS head structure is similar with FCOS, however ATSS use anchor boxes and assign label by Adaptive Training Sample Selection instead max-iou. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. pred_kernel_size (int): Kernel size of ``nn.Conv2d`` stacked_convs (int): Number of stacking convs of the head. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization layer. Defaults to ``dict(type='GN', num_groups=32, requires_grad=True)``. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss. Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)``. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. """ def __init__(self, num_classes: int, in_channels: int, pred_kernel_size: int = 3, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), reg_decoded_bbox: bool = True, loss_centerness: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='atss_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.pred_kernel_size = pred_kernel_size self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, reg_decoded_bbox=reg_decoded_bbox, init_cfg=init_cfg, **kwargs) self.sampling = False self.loss_centerness = MODELS.build(loss_centerness) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pred_pad_size = self.pred_kernel_size // 2 self.atss_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, self.pred_kernel_size, padding=pred_pad_size) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, self.pred_kernel_size, padding=pred_pad_size) self.atss_centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, self.pred_kernel_size, padding=pred_pad_size) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ return multi_apply(self.forward_single, x, self.scales) def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. centerness (Tensor): Centerness for a single scale level, the channel number is (N, num_anchors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() centerness = self.atss_centerness(reg_feat) return cls_score, bbox_pred, centerness def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, centerness: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, avg_factor: float) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). avg_factor (float): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) centerness = centerness.permute(0, 2, 3, 1).reshape(-1) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # classification loss loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_centerness = centerness[pos_inds] centerness_targets = self.centerness_target( pos_anchors, pos_bbox_targets) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_bbox_targets, weight=centerness_targets, avg_factor=1.0) # centerness loss loss_centerness = self.loss_centerness( pos_centerness, centerness_targets, avg_factor=avg_factor) else: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, loss_centerness, \ bbox_avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, centernesses, labels_list, label_weights_list, bbox_targets_list, avg_factor=avg_factor) bbox_avg_factor = sum(bbox_avg_factor) bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness) def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor: """Calculate the centerness between anchors and gts. Only calculate pos centerness targets, otherwise there may be nan. Args: anchors (Tensor): Anchors with shape (N, 4), "xyxy" format. gts (Tensor): Ground truth bboxes with shape (N, 4), "xyxy" format. Returns: Tensor: Centerness between anchors and gts. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l_ = anchors_cx - gts[:, 0] t_ = anchors_cy - gts[:, 1] r_ = gts[:, 2] - anchors_cx b_ = gts[:, 3] - anchors_cy left_right = torch.stack([l_, r_], dim=1) top_bottom = torch.stack([t_, b_], dim=1) centerness = torch.sqrt( (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) assert not torch.isnan(centerness).any() return centerness def get_targets(self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get targets for ATSS head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors (List[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign(pred_instances, num_level_anchors_inside, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if self.reg_decoded_bbox: pos_bbox_targets = sampling_result.pos_gt_bboxes else: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_priors, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): """Get the number of valid anchors in every level.""" split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
23,314
43.409524
79
py
ERD
ERD-main/mmdet/models/dense_heads/detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Linear from mmcv.cnn.bricks.transformer import FFN from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh from mmdet.utils import (ConfigType, InstanceList, OptInstanceList, OptMultiConfig, reduce_mean) from ..utils import multi_apply @MODELS.register_module() class DETRHead(BaseModule): r"""Head of DETR. DETR:End-to-End Object Detection with Transformers. More details can be found in the `paper <https://arxiv.org/pdf/2005.12872>`_ . Args: num_classes (int): Number of categories excluding the background. embed_dims (int): The dims of Transformer embedding. num_reg_fcs (int): Number of fully-connected layers used in `FFN`, which is then used for the regression head. Defaults to 2. sync_cls_avg_factor (bool): Whether to sync the `avg_factor` of all ranks. Default to `False`. loss_cls (:obj:`ConfigDict` or dict): Config of the classification loss. Defaults to `CrossEntropyLoss`. loss_bbox (:obj:`ConfigDict` or dict): Config of the regression bbox loss. Defaults to `L1Loss`. loss_iou (:obj:`ConfigDict` or dict): Config of the regression iou loss. Defaults to `GIoULoss`. train_cfg (:obj:`ConfigDict` or dict): Training config of transformer head. test_cfg (:obj:`ConfigDict` or dict): Testing config of transformer head. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. Defaults to None. """ _version = 2 def __init__( self, num_classes: int, embed_dims: int = 256, num_reg_fcs: int = 2, sync_cls_avg_factor: bool = False, loss_cls: ConfigType = dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox: ConfigType = dict(type='L1Loss', loss_weight=5.0), loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0), train_cfg: ConfigType = dict( assigner=dict( type='HungarianAssigner', match_costs=[ dict(type='ClassificationCost', weight=1.), dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), dict(type='IoUCost', iou_mode='giou', weight=2.0) ])), test_cfg: ConfigType = dict(max_per_img=100), init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) self.bg_cls_weight = 0 self.sync_cls_avg_factor = sync_cls_avg_factor class_weight = loss_cls.get('class_weight', None) if class_weight is not None and (self.__class__ is DETRHead): assert isinstance(class_weight, float), 'Expected ' \ 'class_weight to have type float. Found ' \ f'{type(class_weight)}.' # NOTE following the official DETR repo, bg_cls_weight means # relative classification weight of the no-object class. bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) assert isinstance(bg_cls_weight, float), 'Expected ' \ 'bg_cls_weight to have type float. Found ' \ f'{type(bg_cls_weight)}.' class_weight = torch.ones(num_classes + 1) * class_weight # set background class as the last indice class_weight[num_classes] = bg_cls_weight loss_cls.update({'class_weight': class_weight}) if 'bg_cls_weight' in loss_cls: loss_cls.pop('bg_cls_weight') self.bg_cls_weight = bg_cls_weight if train_cfg: assert 'assigner' in train_cfg, 'assigner should be provided ' \ 'when train_cfg is set.' assigner = train_cfg['assigner'] self.assigner = TASK_UTILS.build(assigner) if train_cfg.get('sampler', None) is not None: raise RuntimeError('DETR do not build sampler.') self.num_classes = num_classes self.embed_dims = embed_dims self.num_reg_fcs = num_reg_fcs self.train_cfg = train_cfg self.test_cfg = test_cfg self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) self.loss_iou = MODELS.build(loss_iou) if self.loss_cls.use_sigmoid: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self._init_layers() def _init_layers(self) -> None: """Initialize layers of the transformer head.""" # cls branch self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) # reg branch self.activate = nn.ReLU() self.reg_ffn = FFN( self.embed_dims, self.embed_dims, self.num_reg_fcs, dict(type='ReLU', inplace=True), dropout=0.0, add_residual=False) # NOTE the activations of reg_branch here is the same as # those in transformer, but they are actually different # in DAB-DETR (prelu in transformer and relu in reg_branch) self.fc_reg = Linear(self.embed_dims, 4) def forward(self, hidden_states: Tensor) -> Tuple[Tensor]: """"Forward function. Args: hidden_states (Tensor): Features from transformer decoder. If `return_intermediate_dec` in detr.py is True output has shape (num_decoder_layers, bs, num_queries, dim), else has shape (1, bs, num_queries, dim) which only contains the last layer outputs. Returns: tuple[Tensor]: results of head containing the following tensor. - layers_cls_scores (Tensor): Outputs from the classification head, shape (num_decoder_layers, bs, num_queries, cls_out_channels). Note cls_out_channels should include background. - layers_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h), has shape (num_decoder_layers, bs, num_queries, 4). """ layers_cls_scores = self.fc_cls(hidden_states) layers_bbox_preds = self.fc_reg( self.activate(self.reg_ffn(hidden_states))).sigmoid() return layers_cls_scores, layers_bbox_preds def loss(self, hidden_states: Tensor, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: hidden_states (Tensor): Feature from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, cls_out_channels) or (num_decoder_layers, num_queries, bs, cls_out_channels). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """"Loss function. Only outputs from the last feature level are used for computing losses by default. Args: all_layers_cls_scores (Tensor): Classification outputs of each decoder layers. Each is a 4D-tensor, has shape (num_decoder_layers, bs, num_queries, cls_out_channels). all_layers_bbox_preds (Tensor): Sigmoid regression outputs of each decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape (num_decoder_layers, bs, num_queries, 4). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert batch_gt_instances_ignore is None, \ f'{self.__class__.__name__} only supports ' \ 'for batch_gt_instances_ignore setting to None.' losses_cls, losses_bbox, losses_iou = multi_apply( self.loss_by_feat_single, all_layers_cls_scores, all_layers_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas) loss_dict = dict() # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_bbox'] = losses_bbox[-1] loss_dict['loss_iou'] = losses_iou[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_bbox_i, loss_iou_i in \ zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i num_dec_layer += 1 return loss_dict def loss_by_feat_single(self, cls_scores: Tensor, bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict]) -> Tuple[Tensor]: """Loss function for outputs from a single decoder layer of a single feature level. Args: cls_scores (Tensor): Box score logits from a single decoder layer for all images, has shape (bs, num_queries, cls_out_channels). bbox_preds (Tensor): Sigmoid outputs from a single decoder layer for all images, with normalized coordinate (cx, cy, w, h) and shape (bs, num_queries, 4). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. Returns: Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and `loss_iou`. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, batch_gt_instances, batch_img_metas) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets labels = torch.cat(labels_list, 0) label_weights = torch.cat(label_weights_list, 0) bbox_targets = torch.cat(bbox_targets_list, 0) bbox_weights = torch.cat(bbox_weights_list, 0) # classification loss cls_scores = cls_scores.reshape(-1, self.cls_out_channels) # construct weighted avg_factor to match with the official DETR repo cls_avg_factor = num_total_pos * 1.0 + \ num_total_neg * self.bg_cls_weight if self.sync_cls_avg_factor: cls_avg_factor = reduce_mean( cls_scores.new_tensor([cls_avg_factor])) cls_avg_factor = max(cls_avg_factor, 1) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=cls_avg_factor) # Compute the average number of gt boxes across all gpus, for # normalization purposes num_total_pos = loss_cls.new_tensor([num_total_pos]) num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() # construct factors used for rescale bboxes factors = [] for img_meta, bbox_pred in zip(batch_img_metas, bbox_preds): img_h, img_w, = img_meta['img_shape'] factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0).repeat( bbox_pred.size(0), 1) factors.append(factor) factors = torch.cat(factors, 0) # DETR regress the relative position of boxes (cxcywh) in the image, # thus the learning target is normalized by the image size. So here # we need to re-scale them for calculating IoU loss bbox_preds = bbox_preds.reshape(-1, 4) bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors # regression IoU loss, defaultly GIoU loss loss_iou = self.loss_iou( bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) # regression L1 loss loss_bbox = self.loss_bbox( bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) return loss_cls, loss_bbox, loss_iou def get_targets(self, cls_scores_list: List[Tensor], bbox_preds_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict]) -> tuple: """Compute regression and classification targets for a batch image. Outputs from a single decoder layer of a single feature level are used. Args: cls_scores_list (list[Tensor]): Box score logits from a single decoder layer for each image, has shape [num_queries, cls_out_channels]. bbox_preds_list (list[Tensor]): Sigmoid outputs from a single decoder layer for each image, with normalized coordinate (cx, cy, w, h) and shape [num_queries, 4]. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels for all images. - label_weights_list (list[Tensor]): Label weights for all images. - bbox_targets_list (list[Tensor]): BBox targets for all images. - bbox_weights_list (list[Tensor]): BBox weights for all images. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. """ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(self._get_targets_single, cls_scores_list, bbox_preds_list, batch_gt_instances, batch_img_metas) num_total_pos = sum((inds.numel() for inds in pos_inds_list)) num_total_neg = sum((inds.numel() for inds in neg_inds_list)) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_targets_single(self, cls_score: Tensor, bbox_pred: Tensor, gt_instances: InstanceData, img_meta: dict) -> tuple: """Compute regression and classification targets for one image. Outputs from a single decoder layer of a single feature level are used. Args: cls_score (Tensor): Box score logits from a single decoder layer for one image. Shape [num_queries, cls_out_channels]. bbox_pred (Tensor): Sigmoid outputs from a single decoder layer for one image, with normalized coordinate (cx, cy, w, h) and shape [num_queries, 4]. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for one image. Returns: tuple[Tensor]: a tuple containing the following for one image. - labels (Tensor): Labels of each image. - label_weights (Tensor]): Label weights of each image. - bbox_targets (Tensor): BBox targets of each image. - bbox_weights (Tensor): BBox weights of each image. - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. """ img_h, img_w = img_meta['img_shape'] factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) num_bboxes = bbox_pred.size(0) # convert bbox_pred from xywh, normalized to xyxy, unnormalized bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) bbox_pred = bbox_pred * factor pred_instances = InstanceData(scores=cls_score, bboxes=bbox_pred) # assigner and sampler assign_result = self.assigner.assign( pred_instances=pred_instances, gt_instances=gt_instances, img_meta=img_meta) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds.long(), :] # label targets labels = gt_bboxes.new_full((num_bboxes, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[pos_assigned_gt_inds] label_weights = gt_bboxes.new_ones(num_bboxes) # bbox targets bbox_targets = torch.zeros_like(bbox_pred) bbox_weights = torch.zeros_like(bbox_pred) bbox_weights[pos_inds] = 1.0 # DETR regress the relative position of boxes (cxcywh) in the image. # Thus the learning target should be normalized by the image size, also # the box format should be converted from defaultly x1y1x2y2 to cxcywh. pos_gt_bboxes_normalized = pos_gt_bboxes / factor pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) bbox_targets[pos_inds] = pos_gt_bboxes_targets return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def loss_and_predict( self, hidden_states: Tuple[Tensor], batch_data_samples: SampleList) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (tuple[Tensor]): Feature from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns: tuple: the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas) return losses, predictions def predict(self, hidden_states: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = True) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] last_layer_hidden_state = hidden_states[-1].unsqueeze(0) outs = self(last_layer_hidden_state) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions def predict_by_feat(self, layer_cls_scores: Tensor, layer_bbox_preds: Tensor, batch_img_metas: List[dict], rescale: bool = True) -> InstanceList: """Transform network outputs for a batch into bbox predictions. Args: layer_cls_scores (Tensor): Classification outputs of the last or all decoder layer. Each is a 4D-tensor, has shape (num_decoder_layers, bs, num_queries, cls_out_channels). layer_bbox_preds (Tensor): Sigmoid regression outputs of the last or all decoder layer. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape (num_decoder_layers, bs, num_queries, 4). batch_img_metas (list[dict]): Meta information of each image. rescale (bool, optional): If `True`, return boxes in original image space. Defaults to `True`. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ # NOTE only using outputs from the last feature level, # and only the outputs from the last decoder layer is used. cls_scores = layer_cls_scores[-1] bbox_preds = layer_bbox_preds[-1] result_list = [] for img_id in range(len(batch_img_metas)): cls_score = cls_scores[img_id] bbox_pred = bbox_preds[img_id] img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single(cls_score, bbox_pred, img_meta, rescale) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = True) -> InstanceData: """Transform outputs from the last decoder layer into bbox predictions for each image. Args: cls_score (Tensor): Box score logits from the last decoder layer for each image. Shape [num_queries, cls_out_channels]. bbox_pred (Tensor): Sigmoid outputs from the last decoder layer for each image, with coordinate format (cx, cy, w, h) and shape [num_queries, 4]. img_meta (dict): Image meta info. rescale (bool): If True, return boxes in original image space. Default True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_score) == len(bbox_pred) # num_queries max_per_img = self.test_cfg.get('max_per_img', len(cls_score)) img_shape = img_meta['img_shape'] # exclude background if self.loss_cls.use_sigmoid: cls_score = cls_score.sigmoid() scores, indexes = cls_score.view(-1).topk(max_per_img) det_labels = indexes % self.num_classes bbox_index = indexes // self.num_classes bbox_pred = bbox_pred[bbox_index] else: scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) scores, bbox_index = scores.topk(max_per_img) bbox_pred = bbox_pred[bbox_index] det_labels = det_labels[bbox_index] det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) if rescale: assert img_meta.get('scale_factor') is not None det_bboxes /= det_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) results = InstanceData() results.bboxes = det_bboxes results.scores = scores results.labels = det_labels return results
28,137
44.752846
79
py
ERD
ERD-main/mmdet/models/dense_heads/rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.ops import batched_nms from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import (cat_boxes, empty_box_as, get_box_tensor, get_box_wh, scale_boxes) from mmdet.utils import InstanceList, MultiConfig, OptInstanceList from .anchor_head import AnchorHead @MODELS.register_module() class RPNHead(AnchorHead): """Implementation of RPN head. Args: in_channels (int): Number of channels in the input feature map. num_classes (int): Number of categories excluding the background category. Defaults to 1. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict]): Initialization config dict. num_convs (int): Number of convolution layers in the head. Defaults to 1. """ # noqa: W605 def __init__(self, in_channels: int, num_classes: int = 1, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01), num_convs: int = 1, **kwargs) -> None: self.num_convs = num_convs assert num_classes == 1 super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" if self.num_convs > 1: rpn_convs = [] for i in range(self.num_convs): if i == 0: in_channels = self.in_channels else: in_channels = self.feat_channels # use ``inplace=False`` to avoid error: one of the variables # needed for gradient computation has been modified by an # inplace operation. rpn_convs.append( ConvModule( in_channels, self.feat_channels, 3, padding=1, inplace=False)) self.rpn_conv = nn.Sequential(*rpn_convs) else: self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) reg_dim = self.bbox_coder.encode_size self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_base_priors * reg_dim, 1) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_base_priors * 4. """ x = self.rpn_conv(x) x = F.relu(x) rpn_cls_score = self.rpn_cls(x) rpn_bbox_pred = self.rpn_reg(x) return rpn_cls_score, rpn_bbox_pred def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) \ -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[obj:InstanceData]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[obj:InstanceData], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Returns: dict[str, Tensor]: A dictionary of loss components. """ losses = super().loss_by_feat( cls_scores, bbox_preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Be compatible with BaseDenseHead. Not used in RPNHead. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] level_ids = [] for level_idx, (cls_score, bbox_pred, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] reg_dim = self.bbox_coder.encode_size bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, reg_dim) cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0] since mmdet v2.0 # BG cat_id: 1 scores = cls_score.softmax(-1)[:, :-1] scores = torch.squeeze(scores) if 0 < nms_pre < scores.shape[0]: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:nms_pre] scores = ranked_scores[:nms_pre] bbox_pred = bbox_pred[topk_inds, :] priors = priors[topk_inds] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_priors.append(priors) mlvl_scores.append(scores) # use level id to implement the separate level nms level_ids.append( scores.new_full((scores.size(0), ), level_idx, dtype=torch.long)) bbox_pred = torch.cat(mlvl_bbox_preds) priors = cat_boxes(mlvl_valid_priors) bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape) results = InstanceData() results.bboxes = bboxes results.scores = torch.cat(mlvl_scores) results.level_ids = torch.cat(level_ids) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, img_meta=img_meta) def _bbox_post_process(self, results: InstanceData, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None) -> InstanceData: """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (ConfigDict): Test / postprocessing configuration. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Default to True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert with_nms, '`with_nms` must be True in RPNHead' if rescale: assert img_meta.get('scale_factor') is not None scale_factor = [1 / s for s in img_meta['scale_factor']] results.bboxes = scale_boxes(results.bboxes, scale_factor) # filter small size bboxes if cfg.get('min_bbox_size', -1) >= 0: w, h = get_box_wh(results.bboxes) valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): results = results[valid_mask] if results.bboxes.numel() > 0: bboxes = get_box_tensor(results.bboxes) det_bboxes, keep_idxs = batched_nms(bboxes, results.scores, results.level_ids, cfg.nms) results = results[keep_idxs] # some nms would reweight the score, such as softnms results.scores = det_bboxes[:, -1] results = results[:cfg.max_per_img] # TODO: This would unreasonably show the 0th class label # in visualization results.labels = results.scores.new_zeros( len(results), dtype=torch.long) del results.level_ids else: # To avoid some potential error results_ = InstanceData() results_.bboxes = empty_box_as(results.bboxes) results_.scores = results.scores.new_zeros(0) results_.labels = results.scores.new_zeros(0) results = results_ return results
12,882
41.518152
79
py
ERD
ERD-main/mmdet/models/dense_heads/anchor_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import BaseBoxes, cat_boxes, get_box_tensor from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, OptMultiConfig) from ..task_modules.prior_generators import (AnchorGenerator, anchor_inside_flags) from ..task_modules.samplers import PseudoSampler from ..utils import images_to_levels, multi_apply, unmap from .base_dense_head import BaseDenseHead @MODELS.register_module() class AnchorHead(BaseDenseHead): """Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. init_cfg (dict or list[dict], optional): Initialization config dict. """ # noqa: W605 def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, anchor_generator: ConfigType = dict( type='AnchorGenerator', scales=[8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0)), reg_decoded_bbox: bool = False, loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox: ConfigType = dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = dict( type='Normal', layer='Conv2d', std=0.01) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 if self.cls_out_channels <= 0: raise ValueError(f'num_classes={num_classes} is too small') self.reg_decoded_bbox = reg_decoded_bbox self.bbox_coder = TASK_UTILS.build(bbox_coder) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self.fp16_enabled = False self.prior_generator = TASK_UTILS.build(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors[0] self._init_layers() @property def num_anchors(self) -> int: warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' 'for consistency or also use ' '`num_base_priors` instead') return self.prior_generator.num_base_priors[0] @property def anchor_generator(self) -> AnchorGenerator: warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' 'please use "prior_generator" instead') return self.prior_generator def _init_layers(self) -> None: """Initialize layers of the head.""" self.conv_cls = nn.Conv2d(self.in_channels, self.num_base_priors * self.cls_out_channels, 1) reg_dim = self.bbox_coder.encode_size self.conv_reg = nn.Conv2d(self.in_channels, self.num_base_priors * reg_dim, 1) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_base_priors * 4. """ cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) return cls_score, bbox_pred def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_scores (list[Tensor]): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * 4. """ return multi_apply(self.forward_single, x) def get_anchors(self, featmap_sizes: List[tuple], batch_img_metas: List[dict], device: Union[torch.device, str] = 'cuda') \ -> Tuple[List[List[Tensor]], List[List[Tensor]]]: """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. batch_img_metas (list[dict]): Image meta info. device (torch.device | str): Device for returned tensors. Defaults to cuda. Returns: tuple: - anchor_list (list[list[Tensor]]): Anchors of each image. - valid_flag_list (list[list[Tensor]]): Valid flags of each image. """ num_imgs = len(batch_img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_flags = self.prior_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def _get_targets_single(self, flat_anchors: Union[Tensor, BaseBoxes], valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. Args: flat_anchors (Tensor or :obj:`BaseBoxes`): Multi-level anchors of the image, which are concatenated into a single tensor or box type of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors, ). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: - labels (Tensor): Labels of each level. - label_weights (Tensor): Label weights of each level. - bbox_targets (Tensor): BBox targets of each level. - bbox_weights (Tensor): BBox weights of each level. - pos_inds (Tensor): positive samples indexes. - neg_inds (Tensor): negative samples indexes. - sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags] pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) # No sampling is required except for RPN and # Guided Anchoring algorithms sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] target_dim = gt_instances.bboxes.size(-1) if self.reg_decoded_bbox \ else self.bbox_coder.encode_size bbox_targets = anchors.new_zeros(num_valid_anchors, target_dim) bbox_weights = anchors.new_zeros(num_valid_anchors, target_dim) # TODO: Considering saving memory, is it necessary to be long? labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # `bbox_coder.encode` accepts tensor or box type inputs and generates # tensor targets. If regressing decoded boxes, the code will convert # box type `pos_bbox_targets` to tensor. if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_priors, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes pos_bbox_targets = get_box_tensor(pos_bbox_targets) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_targets(self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True, return_sampling_results: bool = False) -> tuple: """Compute regression and classification targets for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. return_sampling_results (bool): Whether to return the sampling results. Defaults to False. Returns: tuple: Usually returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - bbox_weights_list (list[Tensor]): BBox weights of each level. - avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors to a single tensor concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(cat_boxes(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] rest_results = list(results[7:]) # user-added return values # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # update `_raw_positive_infos`, which will be used when calling # `get_positive_infos`. self._raw_positive_infos.update(sampling_results=sampling_results_list) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) res = (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) if return_sampling_results: res = res + (sampling_results_list, ) for i, r in enumerate(rest_results): # user-added return values rest_results[i] = images_to_levels(r, num_level_anchors) return res + tuple(rest_results) def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, anchors: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, avg_factor: int) -> tuple: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (N, num_total_anchors, 4). avg_factor (int): Average factor that is used to average the loss. Returns: tuple: loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) # regression loss target_dim = bbox_targets.size(-1) bbox_targets = bbox_targets.reshape(-1, target_dim) bbox_weights = bbox_weights.reshape(-1, target_dim) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, self.bbox_coder.encode_size) if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. anchors = anchors.reshape(-1, anchors.size(-1)) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) bbox_pred = get_box_tensor(bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor) return loss_cls, loss_bbox def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(cat_boxes(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
24,816
45.736347
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolox_head.py
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import List, Optional, Sequence, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.ops.nms import batched_nms from mmengine.config import ConfigDict from mmengine.model import bias_init_with_prob from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import bbox_xyxy_to_cxcywh from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList, OptMultiConfig, reduce_mean) from ..task_modules.prior_generators import MlvlPointGenerator from ..task_modules.samplers import PseudoSampler from ..utils import multi_apply from .base_dense_head import BaseDenseHead @MODELS.register_module() class YOLOXHead(BaseDenseHead): """YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels in stacking convs. Defaults to 256 stacked_convs (int): Number of stacking convs of the head. Defaults to (8, 16, 32). strides (Sequence[int]): Downsample factor of each feature map. Defaults to None. use_depthwise (bool): Whether to depthwise separable convolution in blocks. Defaults to False. dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Defaults to False. conv_bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Defaults to "auto". conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001). act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. Defaults to None. loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss. loss_l1 (:obj:`ConfigDict` or dict): Config of L1 loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of anchor head. Defaults to None. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of anchor head. Defaults to None. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or list[dict], optional): Initialization config dict. Defaults to None. """ def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, stacked_convs: int = 2, strides: Sequence[int] = (8, 16, 32), use_depthwise: bool = False, dcn_on_last_conv: bool = False, conv_bias: Union[bool, str] = 'auto', conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), act_cfg: ConfigType = dict(type='Swish'), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_bbox: ConfigType = dict( type='IoULoss', mode='square', eps=1e-16, reduction='sum', loss_weight=5.0), loss_obj: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_l1: ConfigType = dict( type='L1Loss', reduction='sum', loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu') ) -> None: super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.cls_out_channels = num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.use_depthwise = use_depthwise self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.use_sigmoid_cls = True self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.loss_cls: nn.Module = MODELS.build(loss_cls) self.loss_bbox: nn.Module = MODELS.build(loss_bbox) self.loss_obj: nn.Module = MODELS.build(loss_obj) self.use_l1 = False # This flag will be modified by hooks. self.loss_l1: nn.Module = MODELS.build(loss_l1) self.prior_generator = MlvlPointGenerator(strides, offset=0) self.test_cfg = test_cfg self.train_cfg = train_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) # YOLOX does not support sampling self.sampler = PseudoSampler() self._init_layers() def _init_layers(self) -> None: """Initialize heads for all level feature maps.""" self.multi_level_cls_convs = nn.ModuleList() self.multi_level_reg_convs = nn.ModuleList() self.multi_level_conv_cls = nn.ModuleList() self.multi_level_conv_reg = nn.ModuleList() self.multi_level_conv_obj = nn.ModuleList() for _ in self.strides: self.multi_level_cls_convs.append(self._build_stacked_convs()) self.multi_level_reg_convs.append(self._build_stacked_convs()) conv_cls, conv_reg, conv_obj = self._build_predictor() self.multi_level_conv_cls.append(conv_cls) self.multi_level_conv_reg.append(conv_reg) self.multi_level_conv_obj.append(conv_obj) def _build_stacked_convs(self) -> nn.Sequential: """Initialize conv layers of a single level head.""" conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule stacked_convs = [] for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg stacked_convs.append( conv( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, bias=self.conv_bias)) return nn.Sequential(*stacked_convs) def _build_predictor(self) -> Tuple[nn.Module, nn.Module, nn.Module]: """Initialize predictor layers of a single level head.""" conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) conv_reg = nn.Conv2d(self.feat_channels, 4, 1) conv_obj = nn.Conv2d(self.feat_channels, 1, 1) return conv_cls, conv_reg, conv_obj def init_weights(self) -> None: """Initialize weights of the head.""" super(YOLOXHead, self).init_weights() # Use prior in model initialization to improve stability bias_init = bias_init_with_prob(0.01) for conv_cls, conv_obj in zip(self.multi_level_conv_cls, self.multi_level_conv_obj): conv_cls.bias.data.fill_(bias_init) conv_obj.bias.data.fill_(bias_init) def forward_single(self, x: Tensor, cls_convs: nn.Module, reg_convs: nn.Module, conv_cls: nn.Module, conv_reg: nn.Module, conv_obj: nn.Module) -> Tuple[Tensor, Tensor, Tensor]: """Forward feature of a single scale level.""" cls_feat = cls_convs(x) reg_feat = reg_convs(x) cls_score = conv_cls(cls_feat) bbox_pred = conv_reg(reg_feat) objectness = conv_obj(reg_feat) return cls_score, bbox_pred, objectness def forward(self, x: Tuple[Tensor]) -> Tuple[List]: """Forward features from the upstream network. Args: x (Tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: Tuple[List]: A tuple of multi-level classification scores, bbox predictions, and objectnesses. """ return multi_apply(self.forward_single, x, self.multi_level_cls_convs, self.multi_level_reg_convs, self.multi_level_conv_cls, self.multi_level_conv_reg, self.multi_level_conv_obj) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], objectnesses: Optional[List[Tensor]], batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> List[InstanceData]: """Transform a batch of output features extracted by the head into bbox results. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) cfg = self.test_cfg if cfg is None else cfg num_imgs = len(batch_img_metas) featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) # flatten cls_scores, bbox_preds and objectness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_objectness = [ objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses ] flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() flatten_priors = torch.cat(mlvl_priors) flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) result_list = [] for img_id, img_meta in enumerate(batch_img_metas): max_scores, labels = torch.max(flatten_cls_scores[img_id], 1) valid_mask = flatten_objectness[ img_id] * max_scores >= cfg.score_thr results = InstanceData( bboxes=flatten_bboxes[img_id][valid_mask], scores=max_scores[valid_mask] * flatten_objectness[img_id][valid_mask], labels=labels[valid_mask]) result_list.append( self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta)) return result_list def _bbox_decode(self, priors: Tensor, bbox_preds: Tensor) -> Tensor: """Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x, tl_y, br_x, br_y). Args: priors (Tensor): Center proiors of an image, has shape (num_instances, 2). bbox_preds (Tensor): Box energies / deltas for all instances, has shape (batch_size, num_instances, 4). Returns: Tensor: Decoded bboxes in (tl_x, tl_y, br_x, br_y) format. Has shape (batch_size, num_instances, 4). """ xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2] whs = bbox_preds[..., 2:].exp() * priors[:, 2:] tl_x = (xys[..., 0] - whs[..., 0] / 2) tl_y = (xys[..., 1] - whs[..., 1] / 2) br_x = (xys[..., 0] + whs[..., 0] / 2) br_y = (xys[..., 1] + whs[..., 1] / 2) decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) return decoded_bboxes def _bbox_post_process(self, results: InstanceData, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None) -> InstanceData: """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default to False. with_nms (bool): If True, do nms before return boxes. Default to True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if rescale: assert img_meta.get('scale_factor') is not None results.bboxes /= results.bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) if with_nms and results.bboxes.numel() > 0: det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores, results.labels, cfg.nms) results = results[keep_idxs] # some nms would reweight the score, such as softnms results.scores = det_bboxes[:, -1] return results def loss_by_feat( self, cls_scores: Sequence[Tensor], bbox_preds: Sequence[Tensor], objectnesses: Sequence[Tensor], batch_gt_instances: Sequence[InstanceData], batch_img_metas: Sequence[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (Sequence[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_priors * num_classes. bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_priors * 4. objectnesses (Sequence[Tensor]): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of losses. """ num_imgs = len(batch_img_metas) if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) flatten_cls_preds = [ cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_pred in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_objectness = [ objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) for objectness in objectnesses ] flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) flatten_objectness = torch.cat(flatten_objectness, dim=1) flatten_priors = torch.cat(mlvl_priors) flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, num_fg_imgs) = multi_apply( self._get_targets_single, flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), flatten_cls_preds.detach(), flatten_bboxes.detach(), flatten_objectness.detach(), batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) # The experimental results show that 'reduce_mean' can improve # performance on the COCO dataset. num_pos = torch.tensor( sum(num_fg_imgs), dtype=torch.float, device=flatten_cls_preds.device) num_total_samples = max(reduce_mean(num_pos), 1.0) pos_masks = torch.cat(pos_masks, 0) cls_targets = torch.cat(cls_targets, 0) obj_targets = torch.cat(obj_targets, 0) bbox_targets = torch.cat(bbox_targets, 0) if self.use_l1: l1_targets = torch.cat(l1_targets, 0) loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), obj_targets) / num_total_samples if num_pos > 0: loss_cls = self.loss_cls( flatten_cls_preds.view(-1, self.num_classes)[pos_masks], cls_targets) / num_total_samples loss_bbox = self.loss_bbox( flatten_bboxes.view(-1, 4)[pos_masks], bbox_targets) / num_total_samples else: # Avoid cls and reg branch not participating in the gradient # propagation when there is no ground-truth in the images. # For more details, please refer to # https://github.com/open-mmlab/mmdetection/issues/7298 loss_cls = flatten_cls_preds.sum() * 0 loss_bbox = flatten_bboxes.sum() * 0 loss_dict = dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) if self.use_l1: if num_pos > 0: loss_l1 = self.loss_l1( flatten_bbox_preds.view(-1, 4)[pos_masks], l1_targets) / num_total_samples else: # Avoid cls and reg branch not participating in the gradient # propagation when there is no ground-truth in the images. # For more details, please refer to # https://github.com/open-mmlab/mmdetection/issues/7298 loss_l1 = flatten_bbox_preds.sum() * 0 loss_dict.update(loss_l1=loss_l1) return loss_dict @torch.no_grad() def _get_targets_single( self, priors: Tensor, cls_preds: Tensor, decoded_bboxes: Tensor, objectness: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None) -> tuple: """Compute classification, regression, and objectness targets for priors in a single image. Args: priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. cls_preds (Tensor): Classification predictions of one image, a 2D-Tensor with shape [num_priors, num_classes] decoded_bboxes (Tensor): Decoded bboxes predictions of one image, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. objectness (Tensor): Objectness predictions of one image, a 1D-Tensor with shape [num_priors] gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: tuple: foreground_mask (list[Tensor]): Binary mask of foreground targets. cls_target (list[Tensor]): Classification targets of an image. obj_target (list[Tensor]): Objectness targets of an image. bbox_target (list[Tensor]): BBox targets of an image. l1_target (int): BBox L1 targets of an image. num_pos_per_img (int): Number of positive samples in an image. """ num_priors = priors.size(0) num_gts = len(gt_instances) # No target if num_gts == 0: cls_target = cls_preds.new_zeros((0, self.num_classes)) bbox_target = cls_preds.new_zeros((0, 4)) l1_target = cls_preds.new_zeros((0, 4)) obj_target = cls_preds.new_zeros((num_priors, 1)) foreground_mask = cls_preds.new_zeros(num_priors).bool() return (foreground_mask, cls_target, obj_target, bbox_target, l1_target, 0) # YOLOX uses center priors with 0.5 offset to assign targets, # but use center priors without offset to regress bboxes. offset_priors = torch.cat( [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) scores = cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid() pred_instances = InstanceData( bboxes=decoded_bboxes, scores=scores.sqrt_(), priors=offset_priors) assign_result = self.assigner.assign( pred_instances=pred_instances, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) pos_inds = sampling_result.pos_inds num_pos_per_img = pos_inds.size(0) pos_ious = assign_result.max_overlaps[pos_inds] # IOU aware classification score cls_target = F.one_hot(sampling_result.pos_gt_labels, self.num_classes) * pos_ious.unsqueeze(-1) obj_target = torch.zeros_like(objectness).unsqueeze(-1) obj_target[pos_inds] = 1 bbox_target = sampling_result.pos_gt_bboxes l1_target = cls_preds.new_zeros((num_pos_per_img, 4)) if self.use_l1: l1_target = self._get_l1_target(l1_target, bbox_target, priors[pos_inds]) foreground_mask = torch.zeros_like(objectness).to(torch.bool) foreground_mask[pos_inds] = 1 return (foreground_mask, cls_target, obj_target, bbox_target, l1_target, num_pos_per_img) def _get_l1_target(self, l1_target: Tensor, gt_bboxes: Tensor, priors: Tensor, eps: float = 1e-8) -> Tensor: """Convert gt bboxes to center offset and log width height.""" gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:] l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) return l1_target
26,925
42.499192
79
py
ERD
ERD-main/mmdet/models/dense_heads/retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmdet.registry import MODELS from .anchor_head import AnchorHead @MODELS.register_module() class RetinaHead(AnchorHead): r"""An anchor-based head used in `RetinaNet <https://arxiv.org/pdf/1708.02002.pdf>`_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors. Example: >>> import torch >>> self = RetinaHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == (self.num_classes) >>> assert box_per_anchor == 4 """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), init_cfg=dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)), **kwargs): assert stacked_convs >= 0, \ '`stacked_convs` must be non-negative integers, ' \ f'but got {stacked_convs} instead.' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(RetinaHead, self).__init__( num_classes, in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() in_channels = self.in_channels for i in range(self.stacked_convs): self.cls_convs.append( ConvModule( in_channels, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( in_channels, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) in_channels = self.feat_channels self.retina_cls = nn.Conv2d( in_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) reg_dim = self.bbox_coder.encode_size self.retina_reg = nn.Conv2d( in_channels, self.num_base_priors * reg_dim, 3, padding=1) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return cls_score, bbox_pred
4,284
34.413223
76
py
ERD
ERD-main/mmdet/models/dense_heads/rtmdet_ins_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import math from typing import List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, is_norm from mmcv.ops import batched_nms from mmengine.model import (BaseModule, bias_init_with_prob, constant_init, normal_init) from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.layers.transformer import inverse_sigmoid from mmdet.models.utils import (filter_scores_and_topk, multi_apply, select_single_mlvl, sigmoid_geometric_mean) from mmdet.registry import MODELS from mmdet.structures.bbox import (cat_boxes, distance2bbox, get_box_tensor, get_box_wh, scale_boxes) from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean from .rtmdet_head import RTMDetHead @MODELS.register_module() class RTMDetInsHead(RTMDetHead): """Detection Head of RTMDet-Ins. Args: num_prototypes (int): Number of mask prototype features extracted from the mask head. Defaults to 8. dyconv_channels (int): Channel of the dynamic conv layers. Defaults to 8. num_dyconvs (int): Number of the dynamic convolution layers. Defaults to 3. mask_loss_stride (int): Down sample stride of the masks for loss computation. Defaults to 4. loss_mask (:obj:`ConfigDict` or dict): Config dict for mask loss. """ def __init__(self, *args, num_prototypes: int = 8, dyconv_channels: int = 8, num_dyconvs: int = 3, mask_loss_stride: int = 4, loss_mask=dict( type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean'), **kwargs) -> None: self.num_prototypes = num_prototypes self.num_dyconvs = num_dyconvs self.dyconv_channels = dyconv_channels self.mask_loss_stride = mask_loss_stride super().__init__(*args, **kwargs) self.loss_mask = MODELS.build(loss_mask) def _init_layers(self) -> None: """Initialize layers of the head.""" super()._init_layers() # a branch to predict kernels of dynamic convs self.kernel_convs = nn.ModuleList() # calculate num dynamic parameters weight_nums, bias_nums = [], [] for i in range(self.num_dyconvs): if i == 0: weight_nums.append( # mask prototype and coordinate features (self.num_prototypes + 2) * self.dyconv_channels) bias_nums.append(self.dyconv_channels * 1) elif i == self.num_dyconvs - 1: weight_nums.append(self.dyconv_channels * 1) bias_nums.append(1) else: weight_nums.append(self.dyconv_channels * self.dyconv_channels) bias_nums.append(self.dyconv_channels * 1) self.weight_nums = weight_nums self.bias_nums = bias_nums self.num_gen_params = sum(weight_nums) + sum(bias_nums) for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.kernel_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) pred_pad_size = self.pred_kernel_size // 2 self.rtm_kernel = nn.Conv2d( self.feat_channels, self.num_gen_params, self.pred_kernel_size, padding=pred_pad_size) self.mask_head = MaskFeatModule( in_channels=self.in_channels, feat_channels=self.feat_channels, stacked_convs=4, num_levels=len(self.prior_generator.strides), num_prototypes=self.num_prototypes, act_cfg=self.act_cfg, norm_cfg=self.norm_cfg) def forward(self, feats: Tuple[Tensor, ...]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - kernel_preds (list[Tensor]): Dynamic conv kernels for all scale levels, each is a 4D-tensor, the channels number is num_gen_params. - mask_feat (Tensor): Output feature of the mask head. Each is a 4D-tensor, the channels number is num_prototypes. """ mask_feat = self.mask_head(feats) cls_scores = [] bbox_preds = [] kernel_preds = [] for idx, (x, scale, stride) in enumerate( zip(feats, self.scales, self.prior_generator.strides)): cls_feat = x reg_feat = x kernel_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.rtm_cls(cls_feat) for kernel_layer in self.kernel_convs: kernel_feat = kernel_layer(kernel_feat) kernel_pred = self.rtm_kernel(kernel_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) if self.with_objectness: objectness = self.rtm_obj(reg_feat) cls_score = inverse_sigmoid( sigmoid_geometric_mean(cls_score, objectness)) reg_dist = scale(self.rtm_reg(reg_feat)) * stride[0] cls_scores.append(cls_score) bbox_preds.append(reg_dist) kernel_preds.append(kernel_pred) return tuple(cls_scores), tuple(bbox_preds), tuple( kernel_preds), mask_feat def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], kernel_preds: List[Tensor], mask_feat: Tensor, score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigType] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). kernel_preds (list[Tensor]): Kernel predictions of dynamic convs for all scale levels, each is a 4D-tensor, has shape (batch_size, num_params, H, W). mask_feat (Tensor): Mask prototype features extracted from the mask head, has shape (batch_size, num_prototypes, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, h, w). """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device, with_stride=True) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl( cls_scores, img_id, detach=True) bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) kernel_pred_list = select_single_mlvl( kernel_preds, img_id, detach=True) if with_score_factors: score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) else: score_factor_list = [None for _ in range(num_levels)] results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, kernel_pred_list=kernel_pred_list, mask_feat=mask_feat[img_id], score_factor_list=score_factor_list, mlvl_priors=mlvl_priors, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], kernel_pred_list: List[Tensor], mask_feat: Tensor, score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigType, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox and mask results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). kernel_preds (list[Tensor]): Kernel predictions of dynamic convs for all scale levels of a single image, each is a 4D-tensor, has shape (num_params, H, W). mask_feat (Tensor): Mask prototype features of a single image extracted from the mask head, has shape (num_prototypes, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, h, w). """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_kernels = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, kernel_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, kernel_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] dim = self.bbox_coder.encode_size bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) kernel_pred = kernel_pred.permute(1, 2, 0).reshape( -1, self.num_gen_params) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. score_thr = cfg.get('score_thr', 0) results = filter_scores_and_topk( scores, score_thr, nms_pre, dict( bbox_pred=bbox_pred, priors=priors, kernel_pred=kernel_pred)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] kernel_pred = filtered_results['kernel_pred'] if with_score_factors: score_factor = score_factor[keep_idxs] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_priors.append(priors) mlvl_scores.append(scores) mlvl_labels.append(labels) mlvl_kernels.append(kernel_pred) if with_score_factors: mlvl_score_factors.append(score_factor) bbox_pred = torch.cat(mlvl_bbox_preds) priors = cat_boxes(mlvl_valid_priors) bboxes = self.bbox_coder.decode( priors[..., :2], bbox_pred, max_shape=img_shape) results = InstanceData() results.bboxes = bboxes results.priors = priors results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) results.kernels = torch.cat(mlvl_kernels) if with_score_factors: results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_mask_post_process( results=results, mask_feat=mask_feat, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def _bbox_mask_post_process( self, results: InstanceData, mask_feat, cfg: ConfigType, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None) -> InstanceData: """bbox and mask post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (ConfigDict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default to False. with_nms (bool): If True, do nms before return boxes. Default to True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, h, w). """ stride = self.prior_generator.strides[0][0] if rescale: assert img_meta.get('scale_factor') is not None scale_factor = [1 / s for s in img_meta['scale_factor']] results.bboxes = scale_boxes(results.bboxes, scale_factor) if hasattr(results, 'score_factors'): # TODO: Add sqrt operation in order to be consistent with # the paper. score_factors = results.pop('score_factors') results.scores = results.scores * score_factors # filter small size bboxes if cfg.get('min_bbox_size', -1) >= 0: w, h = get_box_wh(results.bboxes) valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): results = results[valid_mask] # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg assert with_nms, 'with_nms must be True for RTMDet-Ins' if results.bboxes.numel() > 0: bboxes = get_box_tensor(results.bboxes) det_bboxes, keep_idxs = batched_nms(bboxes, results.scores, results.labels, cfg.nms) results = results[keep_idxs] # some nms would reweight the score, such as softnms results.scores = det_bboxes[:, -1] results = results[:cfg.max_per_img] # process masks mask_logits = self._mask_predict_by_feat_single( mask_feat, results.kernels, results.priors) mask_logits = F.interpolate( mask_logits.unsqueeze(0), scale_factor=stride, mode='bilinear') if rescale: ori_h, ori_w = img_meta['ori_shape'][:2] mask_logits = F.interpolate( mask_logits, size=[ math.ceil(mask_logits.shape[-2] * scale_factor[0]), math.ceil(mask_logits.shape[-1] * scale_factor[1]) ], mode='bilinear', align_corners=False)[..., :ori_h, :ori_w] masks = mask_logits.sigmoid().squeeze(0) masks = masks > cfg.mask_thr_binary results.masks = masks else: h, w = img_meta['ori_shape'][:2] if rescale else img_meta[ 'img_shape'][:2] results.masks = torch.zeros( size=(results.bboxes.shape[0], h, w), dtype=torch.bool, device=results.bboxes.device) return results def parse_dynamic_params(self, flatten_kernels: Tensor) -> tuple: """split kernel head prediction to conv weight and bias.""" n_inst = flatten_kernels.size(0) n_layers = len(self.weight_nums) params_splits = list( torch.split_with_sizes( flatten_kernels, self.weight_nums + self.bias_nums, dim=1)) weight_splits = params_splits[:n_layers] bias_splits = params_splits[n_layers:] for i in range(n_layers): if i < n_layers - 1: weight_splits[i] = weight_splits[i].reshape( n_inst * self.dyconv_channels, -1, 1, 1) bias_splits[i] = bias_splits[i].reshape(n_inst * self.dyconv_channels) else: weight_splits[i] = weight_splits[i].reshape(n_inst, -1, 1, 1) bias_splits[i] = bias_splits[i].reshape(n_inst) return weight_splits, bias_splits def _mask_predict_by_feat_single(self, mask_feat: Tensor, kernels: Tensor, priors: Tensor) -> Tensor: """Generate mask logits from mask features with dynamic convs. Args: mask_feat (Tensor): Mask prototype features. Has shape (num_prototypes, H, W). kernels (Tensor): Kernel parameters for each instance. Has shape (num_instance, num_params) priors (Tensor): Center priors for each instance. Has shape (num_instance, 4). Returns: Tensor: Instance segmentation masks for each instance. Has shape (num_instance, H, W). """ num_inst = priors.shape[0] h, w = mask_feat.size()[-2:] if num_inst < 1: return torch.empty( size=(num_inst, h, w), dtype=mask_feat.dtype, device=mask_feat.device) if len(mask_feat.shape) < 4: mask_feat.unsqueeze(0) coord = self.prior_generator.single_level_grid_priors( (h, w), level_idx=0, device=mask_feat.device).reshape(1, -1, 2) num_inst = priors.shape[0] points = priors[:, :2].reshape(-1, 1, 2) strides = priors[:, 2:].reshape(-1, 1, 2) relative_coord = (points - coord).permute(0, 2, 1) / ( strides[..., 0].reshape(-1, 1, 1) * 8) relative_coord = relative_coord.reshape(num_inst, 2, h, w) mask_feat = torch.cat( [relative_coord, mask_feat.repeat(num_inst, 1, 1, 1)], dim=1) weights, biases = self.parse_dynamic_params(kernels) n_layers = len(weights) x = mask_feat.reshape(1, -1, h, w) for i, (weight, bias) in enumerate(zip(weights, biases)): x = F.conv2d( x, weight, bias=bias, stride=1, padding=0, groups=num_inst) if i < n_layers - 1: x = F.relu(x) x = x.reshape(num_inst, h, w) return x def loss_mask_by_feat(self, mask_feats: Tensor, flatten_kernels: Tensor, sampling_results_list: list, batch_gt_instances: InstanceList) -> Tensor: """Compute instance segmentation loss. Args: mask_feats (list[Tensor]): Mask prototype features extracted from the mask head. Has shape (N, num_prototypes, H, W) flatten_kernels (list[Tensor]): Kernels of the dynamic conv layers. Has shape (N, num_instances, num_params) sampling_results_list (list[:obj:`SamplingResults`]) Batch of assignment results. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: Tensor: The mask loss tensor. """ batch_pos_mask_logits = [] pos_gt_masks = [] for idx, (mask_feat, kernels, sampling_results, gt_instances) in enumerate( zip(mask_feats, flatten_kernels, sampling_results_list, batch_gt_instances)): pos_priors = sampling_results.pos_priors pos_inds = sampling_results.pos_inds pos_kernels = kernels[pos_inds] # n_pos, num_gen_params pos_mask_logits = self._mask_predict_by_feat_single( mask_feat, pos_kernels, pos_priors) if gt_instances.masks.numel() == 0: gt_masks = torch.empty_like(gt_instances.masks) else: gt_masks = gt_instances.masks[ sampling_results.pos_assigned_gt_inds, :] batch_pos_mask_logits.append(pos_mask_logits) pos_gt_masks.append(gt_masks) pos_gt_masks = torch.cat(pos_gt_masks, 0) batch_pos_mask_logits = torch.cat(batch_pos_mask_logits, 0) # avg_factor num_pos = batch_pos_mask_logits.shape[0] num_pos = reduce_mean(mask_feats.new_tensor([num_pos ])).clamp_(min=1).item() if batch_pos_mask_logits.shape[0] == 0: return mask_feats.sum() * 0 scale = self.prior_generator.strides[0][0] // self.mask_loss_stride # upsample pred masks batch_pos_mask_logits = F.interpolate( batch_pos_mask_logits.unsqueeze(0), scale_factor=scale, mode='bilinear', align_corners=False).squeeze(0) # downsample gt masks pos_gt_masks = pos_gt_masks[:, self.mask_loss_stride // 2::self.mask_loss_stride, self.mask_loss_stride // 2::self.mask_loss_stride] loss_mask = self.loss_mask( batch_pos_mask_logits, pos_gt_masks, weight=None, avg_factor=num_pos) return loss_mask def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], kernel_preds: List[Tensor], mask_feat: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Decoded box for each scale level with shape (N, num_anchors * 4, H, W) in [tl_x, tl_y, br_x, br_y] format. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) flatten_cls_scores = torch.cat([ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ], 1) flatten_kernels = torch.cat([ kernel_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_gen_params) for kernel_pred in kernel_preds ], 1) decoded_bboxes = [] for anchor, bbox_pred in zip(anchor_list[0], bbox_preds): anchor = anchor.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) bbox_pred = distance2bbox(anchor, bbox_pred) decoded_bboxes.append(bbox_pred) flatten_bboxes = torch.cat(decoded_bboxes, 1) for gt_instances in batch_gt_instances: gt_instances.masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device) cls_reg_targets = self.get_targets( flatten_cls_scores, flatten_bboxes, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, assign_metrics_list, sampling_results_list) = cls_reg_targets losses_cls, losses_bbox,\ cls_avg_factors, bbox_avg_factors = multi_apply( self.loss_by_feat_single, cls_scores, decoded_bboxes, labels_list, label_weights_list, bbox_targets_list, assign_metrics_list, self.prior_generator.strides) cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) bbox_avg_factor = reduce_mean( sum(bbox_avg_factors)).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) loss_mask = self.loss_mask_by_feat(mask_feat, flatten_kernels, sampling_results_list, batch_gt_instances) loss = dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_mask=loss_mask) return loss class MaskFeatModule(BaseModule): """Mask feature head used in RTMDet-Ins. Args: in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels of the mask feature map branch. num_levels (int): The starting feature map level from RPN that will be used to predict the mask feature map. num_prototypes (int): Number of output channel of the mask feature map branch. This is the channel count of the mask feature map that to be dynamically convolved with the predicted kernel. stacked_convs (int): Number of convs in mask feature branch. act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. Default: dict(type='ReLU', inplace=True) norm_cfg (dict): Config dict for normalization layer. Default: None. """ def __init__( self, in_channels: int, feat_channels: int = 256, stacked_convs: int = 4, num_levels: int = 3, num_prototypes: int = 8, act_cfg: ConfigType = dict(type='ReLU', inplace=True), norm_cfg: ConfigType = dict(type='BN') ) -> None: super().__init__(init_cfg=None) self.num_levels = num_levels self.fusion_conv = nn.Conv2d(num_levels * in_channels, in_channels, 1) convs = [] for i in range(stacked_convs): in_c = in_channels if i == 0 else feat_channels convs.append( ConvModule( in_c, feat_channels, 3, padding=1, act_cfg=act_cfg, norm_cfg=norm_cfg)) self.stacked_convs = nn.Sequential(*convs) self.projection = nn.Conv2d( feat_channels, num_prototypes, kernel_size=1) def forward(self, features: Tuple[Tensor, ...]) -> Tensor: # multi-level feature fusion fusion_feats = [features[0]] size = features[0].shape[-2:] for i in range(1, self.num_levels): f = F.interpolate(features[i], size=size, mode='bilinear') fusion_feats.append(f) fusion_feats = torch.cat(fusion_feats, dim=1) fusion_feats = self.fusion_conv(fusion_feats) # pred mask feats mask_features = self.stacked_convs(fusion_feats) mask_features = self.projection(mask_features) return mask_features @MODELS.register_module() class RTMDetInsSepBNHead(RTMDetInsHead): """Detection Head of RTMDet-Ins with sep-bn layers. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. share_conv (bool): Whether to share conv layers between stages. Defaults to True. norm_cfg (:obj:`ConfigDict` or dict)): Config dict for normalization layer. Defaults to dict(type='BN'). act_cfg (:obj:`ConfigDict` or dict)): Config dict for activation layer. Defaults to dict(type='SiLU', inplace=True). pred_kernel_size (int): Kernel size of prediction layer. Defaults to 1. """ def __init__(self, num_classes: int, in_channels: int, share_conv: bool = True, with_objectness: bool = False, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), act_cfg: ConfigType = dict(type='SiLU', inplace=True), pred_kernel_size: int = 1, **kwargs) -> None: self.share_conv = share_conv super().__init__( num_classes, in_channels, norm_cfg=norm_cfg, act_cfg=act_cfg, pred_kernel_size=pred_kernel_size, with_objectness=with_objectness, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.kernel_convs = nn.ModuleList() self.rtm_cls = nn.ModuleList() self.rtm_reg = nn.ModuleList() self.rtm_kernel = nn.ModuleList() self.rtm_obj = nn.ModuleList() # calculate num dynamic parameters weight_nums, bias_nums = [], [] for i in range(self.num_dyconvs): if i == 0: weight_nums.append( (self.num_prototypes + 2) * self.dyconv_channels) bias_nums.append(self.dyconv_channels) elif i == self.num_dyconvs - 1: weight_nums.append(self.dyconv_channels) bias_nums.append(1) else: weight_nums.append(self.dyconv_channels * self.dyconv_channels) bias_nums.append(self.dyconv_channels) self.weight_nums = weight_nums self.bias_nums = bias_nums self.num_gen_params = sum(weight_nums) + sum(bias_nums) pred_pad_size = self.pred_kernel_size // 2 for n in range(len(self.prior_generator.strides)): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() kernel_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) kernel_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(cls_convs) self.kernel_convs.append(kernel_convs) self.rtm_cls.append( nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, self.pred_kernel_size, padding=pred_pad_size)) self.rtm_reg.append( nn.Conv2d( self.feat_channels, self.num_base_priors * 4, self.pred_kernel_size, padding=pred_pad_size)) self.rtm_kernel.append( nn.Conv2d( self.feat_channels, self.num_gen_params, self.pred_kernel_size, padding=pred_pad_size)) if self.with_objectness: self.rtm_obj.append( nn.Conv2d( self.feat_channels, 1, self.pred_kernel_size, padding=pred_pad_size)) if self.share_conv: for n in range(len(self.prior_generator.strides)): for i in range(self.stacked_convs): self.cls_convs[n][i].conv = self.cls_convs[0][i].conv self.reg_convs[n][i].conv = self.reg_convs[0][i].conv self.mask_head = MaskFeatModule( in_channels=self.in_channels, feat_channels=self.feat_channels, stacked_convs=4, num_levels=len(self.prior_generator.strides), num_prototypes=self.num_prototypes, act_cfg=self.act_cfg, norm_cfg=self.norm_cfg) def init_weights(self) -> None: """Initialize weights of the head.""" for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) bias_cls = bias_init_with_prob(0.01) for rtm_cls, rtm_reg, rtm_kernel in zip(self.rtm_cls, self.rtm_reg, self.rtm_kernel): normal_init(rtm_cls, std=0.01, bias=bias_cls) normal_init(rtm_reg, std=0.01, bias=1) if self.with_objectness: for rtm_obj in self.rtm_obj: normal_init(rtm_obj, std=0.01, bias=bias_cls) def forward(self, feats: Tuple[Tensor, ...]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - kernel_preds (list[Tensor]): Dynamic conv kernels for all scale levels, each is a 4D-tensor, the channels number is num_gen_params. - mask_feat (Tensor): Output feature of the mask head. Each is a 4D-tensor, the channels number is num_prototypes. """ mask_feat = self.mask_head(feats) cls_scores = [] bbox_preds = [] kernel_preds = [] for idx, (x, stride) in enumerate( zip(feats, self.prior_generator.strides)): cls_feat = x reg_feat = x kernel_feat = x for cls_layer in self.cls_convs[idx]: cls_feat = cls_layer(cls_feat) cls_score = self.rtm_cls[idx](cls_feat) for kernel_layer in self.kernel_convs[idx]: kernel_feat = kernel_layer(kernel_feat) kernel_pred = self.rtm_kernel[idx](kernel_feat) for reg_layer in self.reg_convs[idx]: reg_feat = reg_layer(reg_feat) if self.with_objectness: objectness = self.rtm_obj[idx](reg_feat) cls_score = inverse_sigmoid( sigmoid_geometric_mean(cls_score, objectness)) reg_dist = F.relu(self.rtm_reg[idx](reg_feat)) * stride[0] cls_scores.append(cls_score) bbox_preds.append(reg_dist) kernel_preds.append(kernel_pred) return tuple(cls_scores), tuple(bbox_preds), tuple( kernel_preds), mask_feat
43,632
41.157488
79
py
ERD
ERD-main/mmdet/models/dense_heads/ga_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import nms from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList from .guided_anchor_head import GuidedAnchorHead @MODELS.register_module() class GARPNHead(GuidedAnchorHead): """Guided-Anchor-based RPN head.""" def __init__(self, in_channels: int, num_classes: int = 1, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01)), **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) super(GARPNHead, self)._init_layers() def forward_single(self, x: Tensor) -> Tuple[Tensor]: """Forward feature of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=True) (cls_score, bbox_pred, shape_pred, loc_pred) = super().forward_single(x) return cls_score, bbox_pred, shape_pred, loc_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). shape_preds (list[Tensor]): shape predictions for each scale level with shape (N, 1, H, W). loc_preds (list[Tensor]): location predictions for each scale level with shape (N, num_anchors * 2, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ losses = super().loss_by_feat( cls_scores, bbox_preds, shape_preds, loc_preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'], loss_anchor_shape=losses['loss_shape'], loss_anchor_loc=losses['loss_loc']) def _predict_by_feat_single(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], mlvl_anchors: List[Tensor], mlvl_masks: List[Tensor], img_meta: dict, cfg: ConfigType, rescale: bool = False) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). mlvl_anchors (list[Tensor]): Each element in the list is the anchors of a single level in feature pyramid. it has shape (num_priors, 4). mlvl_masks (list[Tensor]): Each element in the list is location masks of a single level. img_meta (dict): Image meta info. cfg (:obj:`ConfigDict` or dict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \ 'naive nms.' mlvl_proposals = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] anchors = mlvl_anchors[idx] mask = mlvl_masks[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = rpn_cls_score.softmax(dim=1)[:, :-1] # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)[mask, :] if scores.dim() == 0: rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: _, topk_inds = scores.topk(cfg.nms_pre) rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] scores = scores[topk_inds] # get proposals w.r.t. anchors and rpn_bbox_pred proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_meta['img_shape']) # filter out too small bboxes if cfg.min_bbox_size >= 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): proposals = proposals[valid_mask] scores = scores[valid_mask] # NMS in current level proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold) proposals = proposals[:cfg.nms_post, :] mlvl_proposals.append(proposals) proposals = torch.cat(mlvl_proposals, 0) if cfg.get('nms_across_levels', False): # NMS across multi levels proposals, _ = nms(proposals[:, :4], proposals[:, -1], cfg.nms.iou_threshold) proposals = proposals[:cfg.max_per_img, :] else: scores = proposals[:, 4] num = min(cfg.max_per_img, proposals.shape[0]) _, topk_inds = scores.topk(num) proposals = proposals[topk_inds, :] bboxes = proposals[:, :-1] scores = proposals[:, -1] if rescale: assert img_meta.get('scale_factor') is not None bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) results = InstanceData() results.bboxes = bboxes results.scores = scores results.labels = scores.new_zeros(scores.size(0), dtype=torch.long) return results
9,455
41.403587
79
py
ERD
ERD-main/mmdet/models/dense_heads/tood_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmcv.ops import deform_conv2d from mmengine import MessageHub from mmengine.config import ConfigDict from mmengine.model import bias_init_with_prob, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import distance2bbox from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from ..task_modules.prior_generators import anchor_inside_flags from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, sigmoid_geometric_mean, unmap) from .atss_head import ATSSHead class TaskDecomposition(nn.Module): """Task decomposition module in task-aligned predictor of TOOD. Args: feat_channels (int): Number of feature channels in TOOD head. stacked_convs (int): Number of conv layers in TOOD head. la_down_rate (int): Downsample rate of layer attention. Defaults to 8. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for normalization layer. Defaults to None. """ def __init__(self, feat_channels: int, stacked_convs: int, la_down_rate: int = 8, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None) -> None: super().__init__() self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.in_channels = self.feat_channels * self.stacked_convs self.norm_cfg = norm_cfg self.layer_attention = nn.Sequential( nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1), nn.ReLU(inplace=True), nn.Conv2d( self.in_channels // la_down_rate, self.stacked_convs, 1, padding=0), nn.Sigmoid()) self.reduction_conv = ConvModule( self.in_channels, self.feat_channels, 1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=norm_cfg is None) def init_weights(self) -> None: """Initialize the parameters.""" for m in self.layer_attention.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) normal_init(self.reduction_conv.conv, std=0.01) def forward(self, feat: Tensor, avg_feat: Optional[Tensor] = None) -> Tensor: """Forward function of task decomposition module.""" b, c, h, w = feat.shape if avg_feat is None: avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) weight = self.layer_attention(avg_feat) # here we first compute the product between layer attention weight and # conv weight, and then compute the convolution between new conv weight # and feature map, in order to save memory and FLOPs. conv_weight = weight.reshape( b, 1, self.stacked_convs, 1) * self.reduction_conv.conv.weight.reshape( 1, self.feat_channels, self.stacked_convs, self.feat_channels) conv_weight = conv_weight.reshape(b, self.feat_channels, self.in_channels) feat = feat.reshape(b, self.in_channels, h * w) feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h, w) if self.norm_cfg is not None: feat = self.reduction_conv.norm(feat) feat = self.reduction_conv.activate(feat) return feat @MODELS.register_module() class TOODHead(ATSSHead): """TOODHead used in `TOOD: Task-aligned One-stage Object Detection. <https://arxiv.org/abs/2108.07755>`_. TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment Learning (TAL). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_dcn (int): Number of deformable convolution in the head. Defaults to 0. anchor_type (str): If set to ``anchor_free``, the head will use centers to regress bboxes. If set to ``anchor_based``, the head will regress bboxes based on anchors. Defaults to ``anchor_free``. initial_loss_cls (:obj:`ConfigDict` or dict): Config of initial loss. Example: >>> self = TOODHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ def __init__(self, num_classes: int, in_channels: int, num_dcn: int = 0, anchor_type: str = 'anchor_free', initial_loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, activated=True, gamma=2.0, alpha=0.25, loss_weight=1.0), **kwargs) -> None: assert anchor_type in ['anchor_free', 'anchor_based'] self.num_dcn = num_dcn self.anchor_type = anchor_type super().__init__( num_classes=num_classes, in_channels=in_channels, **kwargs) if self.train_cfg: self.initial_epoch = self.train_cfg['initial_epoch'] self.initial_assigner = TASK_UTILS.build( self.train_cfg['initial_assigner']) self.initial_loss_cls = MODELS.build(initial_loss_cls) self.assigner = self.initial_assigner self.alignment_assigner = TASK_UTILS.build( self.train_cfg['assigner']) self.alpha = self.train_cfg['alpha'] self.beta = self.train_cfg['beta'] def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.inter_convs = nn.ModuleList() for i in range(self.stacked_convs): if i < self.num_dcn: conv_cfg = dict(type='DCNv2', deform_groups=4) else: conv_cfg = self.conv_cfg chn = self.in_channels if i == 0 else self.feat_channels self.inter_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.cls_decomp = TaskDecomposition(self.feat_channels, self.stacked_convs, self.stacked_convs * 8, self.conv_cfg, self.norm_cfg) self.reg_decomp = TaskDecomposition(self.feat_channels, self.stacked_convs, self.stacked_convs * 8, self.conv_cfg, self.norm_cfg) self.tood_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.tood_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.cls_prob_module = nn.Sequential( nn.Conv2d(self.feat_channels * self.stacked_convs, self.feat_channels // 4, 1), nn.ReLU(inplace=True), nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1)) self.reg_offset_module = nn.Sequential( nn.Conv2d(self.feat_channels * self.stacked_convs, self.feat_channels // 4, 1), nn.ReLU(inplace=True), nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1)) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def init_weights(self) -> None: """Initialize weights of the head.""" bias_cls = bias_init_with_prob(0.01) for m in self.inter_convs: normal_init(m.conv, std=0.01) for m in self.cls_prob_module: if isinstance(m, nn.Conv2d): normal_init(m, std=0.01) for m in self.reg_offset_module: if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls) self.cls_decomp.init_weights() self.reg_decomp.init_weights() normal_init(self.tood_cls, std=0.01, bias=bias_cls) normal_init(self.tood_reg, std=0.01) def forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Decoded box for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format. """ cls_scores = [] bbox_preds = [] for idx, (x, scale, stride) in enumerate( zip(feats, self.scales, self.prior_generator.strides)): b, c, h, w = x.shape anchor = self.prior_generator.single_level_grid_priors( (h, w), idx, device=x.device) anchor = torch.cat([anchor for _ in range(b)]) # extract task interactive features inter_feats = [] for inter_conv in self.inter_convs: x = inter_conv(x) inter_feats.append(x) feat = torch.cat(inter_feats, 1) # task decomposition avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) cls_feat = self.cls_decomp(feat, avg_feat) reg_feat = self.reg_decomp(feat, avg_feat) # cls prediction and alignment cls_logits = self.tood_cls(cls_feat) cls_prob = self.cls_prob_module(feat) cls_score = sigmoid_geometric_mean(cls_logits, cls_prob) # reg prediction and alignment if self.anchor_type == 'anchor_free': reg_dist = scale(self.tood_reg(reg_feat).exp()).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = distance2bbox( self.anchor_center(anchor) / stride[0], reg_dist).reshape(b, h, w, 4).permute(0, 3, 1, 2) # (b, c, h, w) elif self.anchor_type == 'anchor_based': reg_dist = scale(self.tood_reg(reg_feat)).float() reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape( b, h, w, 4).permute(0, 3, 1, 2) / stride[0] else: raise NotImplementedError( f'Unknown anchor type: {self.anchor_type}.' f'Please use `anchor_free` or `anchor_based`.') reg_offset = self.reg_offset_module(feat) bbox_pred = self.deform_sampling(reg_bbox.contiguous(), reg_offset.contiguous()) # After deform_sampling, some boxes will become invalid (The # left-top point is at the right or bottom of the right-bottom # point), which will make the GIoULoss negative. invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \ (bbox_pred[:, [1]] > bbox_pred[:, [3]]) invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred) bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return tuple(cls_scores), tuple(bbox_preds) def deform_sampling(self, feat: Tensor, offset: Tensor) -> Tensor: """Sampling the feature x according to offset. Args: feat (Tensor): Feature offset (Tensor): Spatial offset for feature sampling """ # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y def anchor_center(self, anchors: Tensor) -> Tensor: """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, alignment_metrics: Tensor, stride: Tuple[int, int]) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Decoded bboxes for each scale level with shape (N, num_anchors * 4, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors). bbox_targets (Tensor): BBox regression targets of each anchor with shape (N, num_total_anchors, 4). alignment_metrics (Tensor): Alignment metrics with shape (N, num_total_anchors). stride (Tuple[int, int]): Downsample stride of the feature map. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) alignment_metrics = alignment_metrics.reshape(-1) label_weights = label_weights.reshape(-1) targets = labels if self.epoch < self.initial_epoch else ( labels, alignment_metrics) cls_loss_func = self.initial_loss_cls \ if self.epoch < self.initial_epoch else self.loss_cls loss_cls = cls_loss_func( cls_score, targets, label_weights, avg_factor=1.0) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_decode_bbox_pred = pos_bbox_pred pos_decode_bbox_targets = pos_bbox_targets / stride[0] # regression loss pos_bbox_weight = self.centerness_target( pos_anchors, pos_bbox_targets ) if self.epoch < self.initial_epoch else alignment_metrics[ pos_inds] loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=pos_bbox_weight, avg_factor=1.0) else: loss_bbox = bbox_pred.sum() * 0 pos_bbox_weight = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, alignment_metrics.sum( ), pos_bbox_weight.sum() def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Decoded box for each scale level with shape (N, num_anchors * 4, H, W) in [tl_x, tl_y, br_x, br_y] format. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) flatten_cls_scores = torch.cat([ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels) for cls_score in cls_scores ], 1) flatten_bbox_preds = torch.cat([ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0] for bbox_pred, stride in zip(bbox_preds, self.prior_generator.strides) ], 1) cls_reg_targets = self.get_targets( flatten_cls_scores, flatten_bbox_preds, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, alignment_metrics_list) = cls_reg_targets losses_cls, losses_bbox, \ cls_avg_factors, bbox_avg_factors = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, alignment_metrics_list, self.prior_generator.strides) cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) bbox_avg_factor = reduce_mean( sum(bbox_avg_factors)).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (:obj:`ConfigDict`, optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape \ [num_bboxes, 5], where the first 4 columns are bounding \ box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding \ box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for cls_score, bbox_pred, priors, stride in zip( cls_score_list, bbox_pred_list, mlvl_priors, self.prior_generator.strides): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0] scores = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bboxes = filtered_results['bbox_pred'] mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def get_targets(self, cls_scores: List[List[Tensor]], bbox_preds: List[List[Tensor]], anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in multiple images. Args: cls_scores (list[list[Tensor]]): Classification predictions of images, a 3D-Tensor with shape [num_imgs, num_priors, num_classes]. bbox_preds (list[list[Tensor]]): Decoded bboxes predictions of one image, a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: a tuple containing learning targets. - anchors_list (list[list[Tensor]]): Anchors of each level. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - norm_alignment_metrics_list (list[Tensor]): Normalized alignment metrics of each level. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs # anchor_list: list(b * [-1, 4]) # get epoch information from message hub message_hub = MessageHub.get_current_instance() self.epoch = message_hub.get_info('epoch') if self.epoch < self.initial_epoch: (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_result) = multi_apply( super()._get_targets_single, anchor_list, valid_flag_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) all_assign_metrics = [ weight[..., 0] for weight in all_bbox_weights ] else: (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_assign_metrics) = multi_apply( self._get_targets_single, cls_scores, bbox_preds, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) norm_alignment_metrics_list = images_to_levels(all_assign_metrics, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, norm_alignment_metrics_list) def _get_targets_single(self, cls_scores: Tensor, bbox_preds: Tensor, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: cls_scores (Tensor): Box scores for each image. bbox_preds (Tensor): Box energies / deltas for each image. flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). norm_alignment_metrics (Tensor): Normalized alignment metrics of all priors in the image with shape (N,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] pred_instances = InstanceData( priors=anchors, scores=cls_scores[inside_flags, :], bboxes=bbox_preds[inside_flags, :]) assign_result = self.alignment_assigner.assign(pred_instances, gt_instances, gt_instances_ignore, self.alpha, self.beta) assign_ious = assign_result.max_overlaps assign_metrics = assign_result.assign_metrics sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) norm_alignment_metrics = anchors.new_zeros( num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: # point-based pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 class_assigned_gt_inds = torch.unique( sampling_result.pos_assigned_gt_inds) for gt_inds in class_assigned_gt_inds: gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds == gt_inds] pos_alignment_metrics = assign_metrics[gt_class_inds] pos_ious = assign_ious[gt_class_inds] pos_norm_alignment_metrics = pos_alignment_metrics / ( pos_alignment_metrics.max() + 10e-8) * pos_ious.max() norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) norm_alignment_metrics = unmap(norm_alignment_metrics, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, norm_alignment_metrics)
36,487
44.270471
79
py
ERD
ERD-main/mmdet/models/dense_heads/deformable_detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import Dict, List, Tuple import torch import torch.nn as nn from mmcv.cnn import Linear from mmengine.model import bias_init_with_prob, constant_init from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList, OptInstanceList from ..layers import inverse_sigmoid from .detr_head import DETRHead @MODELS.register_module() class DeformableDETRHead(DETRHead): r"""Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to-End Object Detection. Code is modified from the `official github repo <https://github.com/fundamentalvision/Deformable-DETR>`_. More details can be found in the `paper <https://arxiv.org/abs/2010.04159>`_ . Args: share_pred_layer (bool): Whether to share parameters for all the prediction layers. Defaults to `False`. num_pred_layer (int): The number of the prediction layers. Defaults to 6. as_two_stage (bool, optional): Whether to generate the proposal from the outputs of encoder. Defaults to `False`. """ def __init__(self, *args, share_pred_layer: bool = False, num_pred_layer: int = 6, as_two_stage: bool = False, **kwargs) -> None: self.share_pred_layer = share_pred_layer self.num_pred_layer = num_pred_layer self.as_two_stage = as_two_stage super().__init__(*args, **kwargs) def _init_layers(self) -> None: """Initialize classification branch and regression branch of head.""" fc_cls = Linear(self.embed_dims, self.cls_out_channels) reg_branch = [] for _ in range(self.num_reg_fcs): reg_branch.append(Linear(self.embed_dims, self.embed_dims)) reg_branch.append(nn.ReLU()) reg_branch.append(Linear(self.embed_dims, 4)) reg_branch = nn.Sequential(*reg_branch) if self.share_pred_layer: self.cls_branches = nn.ModuleList( [fc_cls for _ in range(self.num_pred_layer)]) self.reg_branches = nn.ModuleList( [reg_branch for _ in range(self.num_pred_layer)]) else: self.cls_branches = nn.ModuleList( [copy.deepcopy(fc_cls) for _ in range(self.num_pred_layer)]) self.reg_branches = nn.ModuleList([ copy.deepcopy(reg_branch) for _ in range(self.num_pred_layer) ]) def init_weights(self) -> None: """Initialize weights of the Deformable DETR head.""" if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) for m in self.cls_branches: nn.init.constant_(m.bias, bias_init) for m in self.reg_branches: constant_init(m[-1], 0, bias=0) nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) if self.as_two_stage: for m in self.reg_branches: nn.init.constant_(m[-1].bias.data[2:], 0.0) def forward(self, hidden_states: Tensor, references: List[Tensor]) -> Tuple[Tensor]: """Forward function. Args: hidden_states (Tensor): Hidden states output from each decoder layer, has shape (num_decoder_layers, bs, num_queries, dim). references (list[Tensor]): List of the reference from the decoder. The first reference is the `init_reference` (initial) and the other num_decoder_layers(6) references are `inter_references` (intermediate). The `init_reference` has shape (bs, num_queries, 4) when `as_two_stage` of the detector is `True`, otherwise (bs, num_queries, 2). Each `inter_reference` has shape (bs, num_queries, 4) when `with_box_refine` of the detector is `True`, otherwise (bs, num_queries, 2). The coordinates are arranged as (cx, cy) when the last dimension is 2, and (cx, cy, w, h) when it is 4. Returns: tuple[Tensor]: results of head containing the following tensor. - all_layers_outputs_classes (Tensor): Outputs from the classification head, has shape (num_decoder_layers, bs, num_queries, cls_out_channels). - all_layers_outputs_coords (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h), has shape (num_decoder_layers, bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h). """ all_layers_outputs_classes = [] all_layers_outputs_coords = [] for layer_id in range(hidden_states.shape[0]): reference = inverse_sigmoid(references[layer_id]) # NOTE The last reference will not be used. hidden_state = hidden_states[layer_id] outputs_class = self.cls_branches[layer_id](hidden_state) tmp_reg_preds = self.reg_branches[layer_id](hidden_state) if reference.shape[-1] == 4: # When `layer` is 0 and `as_two_stage` of the detector # is `True`, or when `layer` is greater than 0 and # `with_box_refine` of the detector is `True`. tmp_reg_preds += reference else: # When `layer` is 0 and `as_two_stage` of the detector # is `False`, or when `layer` is greater than 0 and # `with_box_refine` of the detector is `False`. assert reference.shape[-1] == 2 tmp_reg_preds[..., :2] += reference outputs_coord = tmp_reg_preds.sigmoid() all_layers_outputs_classes.append(outputs_class) all_layers_outputs_coords.append(outputs_coord) all_layers_outputs_classes = torch.stack(all_layers_outputs_classes) all_layers_outputs_coords = torch.stack(all_layers_outputs_coords) return all_layers_outputs_classes, all_layers_outputs_coords def loss(self, hidden_states: Tensor, references: List[Tensor], enc_outputs_class: Tensor, enc_outputs_coord: Tensor, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the queries of the upstream network. Args: hidden_states (Tensor): Hidden states output from each decoder layer, has shape (num_decoder_layers, num_queries, bs, dim). references (list[Tensor]): List of the reference from the decoder. The first reference is the `init_reference` (initial) and the other num_decoder_layers(6) references are `inter_references` (intermediate). The `init_reference` has shape (bs, num_queries, 4) when `as_two_stage` of the detector is `True`, otherwise (bs, num_queries, 2). Each `inter_reference` has shape (bs, num_queries, 4) when `with_box_refine` of the detector is `True`, otherwise (bs, num_queries, 2). The coordinates are arranged as (cx, cy) when the last dimension is 2, and (cx, cy, w, h) when it is 4. enc_outputs_class (Tensor): The score of each point on encode feature map, has shape (bs, num_feat_points, cls_out_channels). Only when `as_two_stage` is `True` it would be passed in, otherwise it would be `None`. enc_outputs_coord (Tensor): The proposal generate from the encode feature map, has shape (bs, num_feat_points, 4) with the last dimension arranged as (cx, cy, w, h). Only when `as_two_stage` is `True` it would be passed in, otherwise it would be `None`. batch_data_samples (list[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (enc_outputs_class, enc_outputs_coord, batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, enc_cls_scores: Tensor, enc_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Loss function. Args: all_layers_cls_scores (Tensor): Classification scores of all decoder layers, has shape (num_decoder_layers, bs, num_queries, cls_out_channels). all_layers_bbox_preds (Tensor): Regression outputs of all decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h). enc_cls_scores (Tensor): The score of each point on encode feature map, has shape (bs, num_feat_points, cls_out_channels). Only when `as_two_stage` is `True` it would be passes in, otherwise, it would be `None`. enc_bbox_preds (Tensor): The proposal generate from the encode feature map, has shape (bs, num_feat_points, 4) with the last dimension arranged as (cx, cy, w, h). Only when `as_two_stage` is `True` it would be passed in, otherwise it would be `None`. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_dict = super().loss_by_feat(all_layers_cls_scores, all_layers_bbox_preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) # loss of proposal generated from encode feature map. if enc_cls_scores is not None: proposal_gt_instances = copy.deepcopy(batch_gt_instances) for i in range(len(proposal_gt_instances)): proposal_gt_instances[i].labels = torch.zeros_like( proposal_gt_instances[i].labels) enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ self.loss_by_feat_single( enc_cls_scores, enc_bbox_preds, batch_gt_instances=proposal_gt_instances, batch_img_metas=batch_img_metas) loss_dict['enc_loss_cls'] = enc_loss_cls loss_dict['enc_loss_bbox'] = enc_losses_bbox loss_dict['enc_loss_iou'] = enc_losses_iou return loss_dict def predict(self, hidden_states: Tensor, references: List[Tensor], batch_data_samples: SampleList, rescale: bool = True) -> InstanceList: """Perform forward propagation and loss calculation of the detection head on the queries of the upstream network. Args: hidden_states (Tensor): Hidden states output from each decoder layer, has shape (num_decoder_layers, num_queries, bs, dim). references (list[Tensor]): List of the reference from the decoder. The first reference is the `init_reference` (initial) and the other num_decoder_layers(6) references are `inter_references` (intermediate). The `init_reference` has shape (bs, num_queries, 4) when `as_two_stage` of the detector is `True`, otherwise (bs, num_queries, 2). Each `inter_reference` has shape (bs, num_queries, 4) when `with_box_refine` of the detector is `True`, otherwise (bs, num_queries, 2). The coordinates are arranged as (cx, cy) when the last dimension is 2, and (cx, cy, w, h) when it is 4. batch_data_samples (list[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): If `True`, return boxes in original image space. Defaults to `True`. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] outs = self(hidden_states, references) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions def predict_by_feat(self, all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, batch_img_metas: List[Dict], rescale: bool = False) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: all_layers_cls_scores (Tensor): Classification scores of all decoder layers, has shape (num_decoder_layers, bs, num_queries, cls_out_channels). all_layers_bbox_preds (Tensor): Regression outputs of all decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and shape (num_decoder_layers, bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h). batch_img_metas (list[dict]): Meta information of each image. rescale (bool, optional): If `True`, return boxes in original image space. Default `False`. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ cls_scores = all_layers_cls_scores[-1] bbox_preds = all_layers_bbox_preds[-1] result_list = [] for img_id in range(len(batch_img_metas)): cls_score = cls_scores[img_id] bbox_pred = bbox_preds[img_id] img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single(cls_score, bbox_pred, img_meta, rescale) result_list.append(results) return result_list
15,707
46.744681
79
py
ERD
ERD-main/mmdet/models/dense_heads/dab_detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import Linear from mmengine.model import bias_init_with_prob, constant_init from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList from ..layers import MLP, inverse_sigmoid from .conditional_detr_head import ConditionalDETRHead @MODELS.register_module() class DABDETRHead(ConditionalDETRHead): """Head of DAB-DETR. DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR. More details can be found in the `paper <https://arxiv.org/abs/2201.12329>`_ . """ def _init_layers(self) -> None: """Initialize layers of the transformer head.""" # cls branch self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) # reg branch self.fc_reg = MLP(self.embed_dims, self.embed_dims, 4, 3) def init_weights(self) -> None: """initialize weights.""" if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) constant_init(self.fc_reg.layers[-1], 0., bias=0.) def forward(self, hidden_states: Tensor, references: Tensor) -> Tuple[Tensor, Tensor]: """"Forward function. Args: hidden_states (Tensor): Features from transformer decoder. If `return_intermediate_dec` is True output has shape (num_decoder_layers, bs, num_queries, dim), else has shape (1, bs, num_queries, dim) which only contains the last layer outputs. references (Tensor): References from transformer decoder. If `return_intermediate_dec` is True output has shape (num_decoder_layers, bs, num_queries, 2/4), else has shape (1, bs, num_queries, 2/4) which only contains the last layer reference. Returns: tuple[Tensor]: results of head containing the following tensor. - layers_cls_scores (Tensor): Outputs from the classification head, shape (num_decoder_layers, bs, num_queries, cls_out_channels). Note cls_out_channels should include background. - layers_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h), has shape (num_decoder_layers, bs, num_queries, 4). """ layers_cls_scores = self.fc_cls(hidden_states) references_before_sigmoid = inverse_sigmoid(references, eps=1e-3) tmp_reg_preds = self.fc_reg(hidden_states) tmp_reg_preds[..., :references_before_sigmoid. size(-1)] += references_before_sigmoid layers_bbox_preds = tmp_reg_preds.sigmoid() return layers_cls_scores, layers_bbox_preds def predict(self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList, rescale: bool = True) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (Tensor): Feature from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): references from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2/4). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] last_layer_hidden_state = hidden_states[-1].unsqueeze(0) last_layer_reference = references[-1].unsqueeze(0) outs = self(last_layer_hidden_state, last_layer_reference) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions
4,553
41.560748
79
py
ERD
ERD-main/mmdet/models/dense_heads/ga_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import MaskedConv2d from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @MODELS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-based RetinaNet head.""" def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: if init_cfg is None: init_cfg = dict( type='Normal', layer='Conv2d', std=0.01, override=[ dict( type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01) ]) self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) num_anchors = self.square_anchor_generator.num_base_priors[0] self.conv_shape = nn.Conv2d(self.feat_channels, num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def forward_single(self, x: Tensor) -> Tuple[Tensor]: """Forward feature map of a single scale level.""" cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return cls_score, bbox_pred, shape_pred, loc_pred
4,224
33.917355
75
py
ERD
ERD-main/mmdet/models/dense_heads/ld_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean from ..utils import multi_apply, unpack_gt_instances from .gfl_head import GFLHead @MODELS.register_module() class LDHead(GFLHead): """Localization distillation Head. (Short description) It utilizes the learned bbox distributions to transfer the localization dark knowledge from teacher to student. Original paper: `Localization Distillation for Object Detection. <https://arxiv.org/abs/2102.12252>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. loss_ld (:obj:`ConfigDict` or dict): Config of Localization Distillation Loss (LD), T is the temperature for distillation. """ def __init__(self, num_classes: int, in_channels: int, loss_ld: ConfigType = dict( type='LocalizationDistillationLoss', loss_weight=0.25, T=10), **kwargs) -> dict: super().__init__( num_classes=num_classes, in_channels=in_channels, **kwargs) self.loss_ld = MODELS.build(loss_ld) def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, stride: Tuple[int], soft_targets: Tensor, avg_factor: int): """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. soft_targets (Tensor): Soft BBox regression targets. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[tuple, Tensor]: Loss components and weight targets. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) soft_targets = soft_targets.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) pos_soft_targets = soft_targets[pos_inds] soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) # ld loss loss_ld = self.loss_ld( pred_corners, soft_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_ld = bbox_pred.sum() * 0 loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=avg_factor) return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum() def loss(self, x: List[Tensor], out_teacher: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """ Args: x (list[Tensor]): Features from FPN. out_teacher (tuple[Tensor]): The output of teacher. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: tuple[dict, list]: The loss components and proposals of each image. - losses (dict[str, Tensor]): A dictionary of loss components. - proposal_list (list[Tensor]): Proposals of each image. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs outs = self(x) soft_targets = out_teacher[1] loss_inputs = outs + (batch_gt_instances, batch_img_metas, soft_targets) losses = self.loss_by_feat( *loss_inputs, batch_gt_instances_ignore=batch_gt_instances_ignore) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], soft_targets: List[Tensor], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. soft_targets (list[Tensor]): Soft BBox regression targets. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, losses_dfl, losses_ld, \ avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, soft_targets, avg_factor=avg_factor) avg_factor = sum(avg_factor) + 1e-6 avg_factor = reduce_mean(avg_factor).item() losses_bbox = [x / avg_factor for x in losses_bbox] losses_dfl = [x / avg_factor for x in losses_dfl] return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl, loss_ld=losses_ld)
10,750
40.670543
79
py
ERD
ERD-main/mmdet/models/dense_heads/conditional_detr_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch import torch.nn as nn from mmengine.model import bias_init_with_prob from torch import Tensor from mmdet.models.layers.transformer import inverse_sigmoid from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.utils import InstanceList from .detr_head import DETRHead @MODELS.register_module() class ConditionalDETRHead(DETRHead): """Head of Conditional DETR. Conditional DETR: Conditional DETR for Fast Training Convergence. More details can be found in the `paper. <https://arxiv.org/abs/2108.06152>`_ . """ def init_weights(self): """Initialize weights of the transformer head.""" super().init_weights() # The initialization below for transformer head is very # important as we use Focal_loss for loss_cls if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) def forward(self, hidden_states: Tensor, references: Tensor) -> Tuple[Tensor, Tensor]: """"Forward function. Args: hidden_states (Tensor): Features from transformer decoder. If `return_intermediate_dec` is True output has shape (num_decoder_layers, bs, num_queries, dim), else has shape (1, bs, num_queries, dim) which only contains the last layer outputs. references (Tensor): References from transformer decoder, has shape (bs, num_queries, 2). Returns: tuple[Tensor]: results of head containing the following tensor. - layers_cls_scores (Tensor): Outputs from the classification head, shape (num_decoder_layers, bs, num_queries, cls_out_channels). Note cls_out_channels should include background. - layers_bbox_preds (Tensor): Sigmoid outputs from the regression head with normalized coordinate format (cx, cy, w, h), has shape (num_decoder_layers, bs, num_queries, 4). """ references_unsigmoid = inverse_sigmoid(references) layers_bbox_preds = [] for layer_id in range(hidden_states.shape[0]): tmp_reg_preds = self.fc_reg( self.activate(self.reg_ffn(hidden_states[layer_id]))) tmp_reg_preds[..., :2] += references_unsigmoid outputs_coord = tmp_reg_preds.sigmoid() layers_bbox_preds.append(outputs_coord) layers_bbox_preds = torch.stack(layers_bbox_preds) layers_cls_scores = self.fc_cls(hidden_states) return layers_cls_scores, layers_bbox_preds def loss(self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: hidden_states (Tensor): Features from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): References from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) return losses def loss_and_predict( self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (Tensor): Features from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): References from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2). batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. Returns: tuple: The return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat(*loss_inputs) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas) return losses, predictions def predict(self, hidden_states: Tensor, references: Tensor, batch_data_samples: SampleList, rescale: bool = True) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Over-write because img_metas are needed as inputs for bbox_head. Args: hidden_states (Tensor): Features from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, dim). references (Tensor): References from the transformer decoder, has shape (num_decoder_layers, bs, num_queries, 2). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to True. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] last_layer_hidden_state = hidden_states[-1].unsqueeze(0) outs = self(last_layer_hidden_state, references) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions
7,186
41.526627
79
py
ERD
ERD-main/mmdet/models/dense_heads/ssd_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Sequence, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList from ..losses import smooth_l1_loss from ..task_modules.samplers import PseudoSampler from ..utils import multi_apply from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @MODELS.register_module() class SSDHead(AnchorHead): """Implementation of `SSD head <https://arxiv.org/abs/1512.02325>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (Sequence[int]): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Defaults to 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Defaults to False. conv_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config norm layer. Defaults to None. act_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config activation layer. Defaults to None. anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator. bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of anchor head. test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of anchor head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], Optional): Initialization config dict. """ # noqa: W605 def __init__( self, num_classes: int = 80, in_channels: Sequence[int] = (512, 1024, 512, 256, 256, 256), stacked_convs: int = 0, feat_channels: int = 256, use_depthwise: bool = False, conv_cfg: Optional[ConfigType] = None, norm_cfg: Optional[ConfigType] = None, act_cfg: Optional[ConfigType] = None, anchor_generator: ConfigType = dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], ), reg_decoded_bbox: bool = False, train_cfg: Optional[ConfigType] = None, test_cfg: Optional[ConfigType] = None, init_cfg: MultiConfig = dict( type='Xavier', layer='Conv2d', distribution='uniform', bias=0) ) -> None: super(AnchorHead, self).__init__(init_cfg=init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = num_classes + 1 # add background class self.prior_generator = TASK_UTILS.build(anchor_generator) # Usually the numbers of anchors for each level are the same # except SSD detectors. So it is an int in the most dense # heads but a list of int in SSDHead self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = TASK_UTILS.build(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) def _init_layers(self) -> None: """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # TODO: Use registry to choose ConvModule type conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule for channel, num_base_priors in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel # build stacked conv tower, not used in default ssd for i in range(self.stacked_convs): cls_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( conv( in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels # SSD-Lite head if self.use_depthwise: cls_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append( ConvModule( in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append( nn.Conv2d( in_channel, num_base_priors * self.cls_out_channels, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) reg_layers.append( nn.Conv2d( in_channel, num_base_priors * 4, kernel_size=1 if self.use_depthwise else 3, padding=0 if self.use_depthwise else 1)) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[list[Tensor], list[Tensor]]: A tuple of cls_scores list and bbox_preds list. - cls_scores (list[Tensor]): Classification scores for all scale \ levels, each is a 4D-tensor, the channels number is \ num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale \ levels, each is a 4D-tensor, the channels number is \ num_anchors * 4. """ cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(x, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, anchor: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, avg_factor: int) -> Tuple[Tensor, Tensor]: """Compute loss of a single image. Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor, Tensor]: A tuple of cls loss and bbox loss of one feature map. """ loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = self.train_cfg['neg_pos_ratio'] * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / avg_factor if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg['smoothl1_beta'], avg_factor=avg_factor) return loss_cls[None], loss_bbox def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, List[Tensor]]: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, list[Tensor]]: A dictionary of loss components. the dict has components below: - loss_cls (list[Tensor]): A list containing each feature map \ classification loss. - loss_bbox (list[Tensor]): A list containing each feature map \ regression loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets num_images = len(batch_img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
15,517
41.749311
79
py
ERD
ERD-main/mmdet/models/dense_heads/fcos_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Tuple import torch import torch.nn as nn from mmcv.cnn import Scale from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptInstanceList, RangeType, reduce_mean) from ..utils import multi_apply from .anchor_free_head import AnchorFreeHead INF = 1e8 @MODELS.register_module() class FCOSHead(AnchorFreeHead): """Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to suppress low-quality predictions. Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training tricks used in official repo, which will bring remarkable mAP gains of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for more detail. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points in multiple feature levels. Defaults to (4, 8, 16, 32, 64). regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Defaults to False. center_sample_radius (float): Radius of center sampling. Defaults to 1.5. norm_on_bbox (bool): If true, normalize the regression targets with FPN strides. Defaults to False. centerness_on_reg (bool): If true, position centerness on the regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. Defaults to False. conv_bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Defaults to "auto". loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness loss. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling: bool = False, center_sample_radius: float = 1.5, norm_on_bbox: bool = False, centerness_on_reg: bool = False, loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0), loss_centerness: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.regress_ranges = regress_ranges self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.norm_on_bbox = norm_on_bbox self.centerness_on_reg = centerness_on_reg super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, init_cfg=init_cfg, **kwargs) self.loss_centerness = MODELS.build(loss_centerness) def _init_layers(self) -> None: """Initialize layers of the head.""" super()._init_layers() self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def forward( self, x: Tuple[Tensor] ) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of each level outputs. - cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is \ num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, \ each is a 4D-tensor, the channel number is num_points * 1. """ return multi_apply(self.forward_single, x, self.scales, self.strides) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() return cls_score, bbox_pred, centerness def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): centerness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) labels, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = torch.tensor( len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) num_pos = max(reduce_mean(num_pos), 1.0) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) # centerness weighted iou loss centerness_denorm = max( reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) if len(pos_inds) > 0: pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=centerness_denorm) loss_centerness = self.loss_centerness( pos_centerness, pos_centerness_targets, avg_factor=num_pos) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList ) -> Tuple[List[Tensor], List[Tensor]]: """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Targets of each level. - concat_lvl_labels (list[Tensor]): Labels of each level. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return concat_lvl_labels, concat_lvl_bbox_targets def _get_targets_single( self, gt_instances: InstanceData, points: Tensor, regress_ranges: Tensor, num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor]: """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = len(gt_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] return labels, bbox_targets def centerness_target(self, pos_bbox_targets: Tensor) -> Tensor: """Compute centerness targets. Args: pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape (num_pos, 4) Returns: Tensor: Centerness target. """ # only calculate pos centerness targets, otherwise there may be nan left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] if len(left_right) == 0: centerness_targets = left_right[..., 0] else: centerness_targets = ( left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(centerness_targets)
19,856
42.546053
113
py
ERD
ERD-main/mmdet/models/dense_heads/lad_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import InstanceList, OptInstanceList from ..utils import levels_to_images, multi_apply, unpack_gt_instances from .paa_head import PAAHead @MODELS.register_module() class LADHead(PAAHead): """Label Assignment Head from the paper: `Improving Object Detection by Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_""" def get_label_assignment( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> tuple: """Get label assignment (from teacher). Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: tuple: Returns a tuple containing label assignment variables. - labels (Tensor): Labels of all anchors, each with shape (num_anchors,). - labels_weight (Tensor): Label weights of all anchor. each with shape (num_anchors,). - bboxes_target (Tensor): BBox targets of all anchors. each with shape (num_anchors, 4). - bboxes_weight (Tensor): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds_flatten (Tensor): Contains all index of positive sample in all anchor. - pos_anchors (Tensor): Positive anchors. - num_pos (int): Number of positive anchors. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) if num_pos: pos_anchors = flatten_anchors[pos_inds_flatten] else: pos_anchors = None label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) return label_assignment_results def loss(self, x: List[Tensor], label_assignment_results: tuple, batch_data_samples: SampleList) -> dict: """Forward train with the available label assignment (student receives from teacher). Args: x (list[Tensor]): Features from FPN. label_assignment_results (tuple): As the outputs defined in the function `self.get_label_assignment`. batch_data_samples (list[:obj:`DetDataSample`]): The batch data samples. It usually includes information such as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`. Returns: losses: (dict[str, Tensor]): A dictionary of loss components. """ outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs outs = self(x) loss_inputs = outs + (batch_gt_instances, batch_img_metas) losses = self.loss_by_feat( *loss_inputs, batch_gt_instances_ignore=batch_gt_instances_ignore, label_assignment_results=label_assignment_results) return losses def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, label_assignment_results: Optional[tuple] = None) -> dict: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. label_assignment_results (tuple, optional): As the outputs defined in the function `self.get_ label_assignment`. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(batch_img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( pos_anchors, bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
9,938
42.784141
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolo_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import copy import warnings from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, is_norm from mmengine.model import bias_init_with_prob, constant_init, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList) from ..task_modules.samplers import PseudoSampler from ..utils import filter_scores_and_topk, images_to_levels, multi_apply from .base_dense_head import BaseDenseHead @MODELS.register_module() class YOLOV3Head(BaseDenseHead): """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. Args: num_classes (int): The number of object classes (w/o background) in_channels (Sequence[int]): Number of input channels per scale. out_channels (Sequence[int]): The number of output channels per scale before the final 1x1 layer. Default: (1024, 512, 256). anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator. bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder. featmap_strides (Sequence[int]): The stride of each scale. Should be in descending order. Defaults to (32, 16, 8). one_hot_smoother (float): Set a non-zero value to enable label-smooth Defaults to 0. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config norm layer. Defaults to dict(type='BN', requires_grad=True). act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. Defaults to dict(type='LeakyReLU', negative_slope=0.1). loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_conf (:obj:`ConfigDict` or dict): Config of confidence loss. loss_xy (:obj:`ConfigDict` or dict): Config of xy coordinate loss. loss_wh (:obj:`ConfigDict` or dict): Config of wh coordinate loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of YOLOV3 head. Defaults to None. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of YOLOV3 head. Defaults to None. """ def __init__(self, num_classes: int, in_channels: Sequence[int], out_channels: Sequence[int] = (1024, 512, 256), anchor_generator: ConfigType = dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder: ConfigType = dict(type='YOLOBBoxCoder'), featmap_strides: Sequence[int] = (32, 16, 8), one_hot_smoother: float = 0., conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), act_cfg: ConfigType = dict( type='LeakyReLU', negative_slope=0.1), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_conf: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_xy: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_wh: ConfigType = dict(type='MSELoss', loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None) -> None: super().__init__(init_cfg=None) # Check params assert (len(in_channels) == len(out_channels) == len(featmap_strides)) self.num_classes = num_classes self.in_channels = in_channels self.out_channels = out_channels self.featmap_strides = featmap_strides self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], context=self) else: self.sampler = PseudoSampler() self.one_hot_smoother = one_hot_smoother self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.bbox_coder = TASK_UTILS.build(bbox_coder) self.prior_generator = TASK_UTILS.build(anchor_generator) self.loss_cls = MODELS.build(loss_cls) self.loss_conf = MODELS.build(loss_conf) self.loss_xy = MODELS.build(loss_xy) self.loss_wh = MODELS.build(loss_wh) self.num_base_priors = self.prior_generator.num_base_priors[0] assert len( self.prior_generator.num_base_priors) == len(featmap_strides) self._init_layers() @property def num_levels(self) -> int: """int: number of feature map levels""" return len(self.featmap_strides) @property def num_attrib(self) -> int: """int: number of attributes in pred_map, bboxes (4) + objectness (1) + num_classes""" return 5 + self.num_classes def _init_layers(self) -> None: """initialize conv layers in YOLOv3 head.""" self.convs_bridge = nn.ModuleList() self.convs_pred = nn.ModuleList() for i in range(self.num_levels): conv_bridge = ConvModule( self.in_channels[i], self.out_channels[i], 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) conv_pred = nn.Conv2d(self.out_channels[i], self.num_base_priors * self.num_attrib, 1) self.convs_bridge.append(conv_bridge) self.convs_pred.append(conv_pred) def init_weights(self) -> None: """initialize weights.""" for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability for conv_pred, stride in zip(self.convs_pred, self.featmap_strides): bias = conv_pred.bias.reshape(self.num_base_priors, -1) # init objectness with prior of 8 objects per feature map # refer to https://github.com/ultralytics/yolov3 nn.init.constant_(bias.data[:, 4], bias_init_with_prob(8 / (608 / stride)**2)) nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01)) def forward(self, x: Tuple[Tensor, ...]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ assert len(x) == self.num_levels pred_maps = [] for i in range(self.num_levels): feat = x[i] feat = self.convs_bridge[i](feat) pred_map = self.convs_pred[i](feat) pred_maps.append(pred_map) return tuple(pred_maps), def predict_by_feat(self, pred_maps: Sequence[Tensor], batch_img_metas: Optional[List[dict]], cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. It has been accelerated since PR #5991. Args: pred_maps (Sequence[Tensor]): Raw predictions for a batch of images. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (:obj:`ConfigDict` or dict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(pred_maps) == self.num_levels cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) num_imgs = len(batch_img_metas) featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=pred_maps[0].device) flatten_preds = [] flatten_strides = [] for pred, stride in zip(pred_maps, self.featmap_strides): pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) pred[..., :2].sigmoid_() flatten_preds.append(pred) flatten_strides.append( pred.new_tensor(stride).expand(pred.size(1))) flatten_preds = torch.cat(flatten_preds, dim=1) flatten_bbox_preds = flatten_preds[..., :4] flatten_objectness = flatten_preds[..., 4].sigmoid() flatten_cls_scores = flatten_preds[..., 5:].sigmoid() flatten_anchors = torch.cat(mlvl_anchors) flatten_strides = torch.cat(flatten_strides) flatten_bboxes = self.bbox_coder.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1)) results_list = [] for (bboxes, scores, objectness, img_meta) in zip(flatten_bboxes, flatten_cls_scores, flatten_objectness, batch_img_metas): # Filtering out all predictions with conf < conf_thr conf_thr = cfg.get('conf_thr', -1) if conf_thr > 0: conf_inds = objectness >= conf_thr bboxes = bboxes[conf_inds, :] scores = scores[conf_inds, :] objectness = objectness[conf_inds] score_thr = cfg.get('score_thr', 0) nms_pre = cfg.get('nms_pre', -1) scores, labels, keep_idxs, _ = filter_scores_and_topk( scores, score_thr, nms_pre) results = InstanceData( scores=scores, labels=labels, bboxes=bboxes[keep_idxs], score_factors=objectness[keep_idxs], ) results = self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) results_list.append(results) return results_list def loss_by_feat( self, pred_maps: Sequence[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: pred_maps (list[Tensor]): Prediction map for each scale level, shape (N, num_anchors * num_attrib, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ num_imgs = len(batch_img_metas) device = pred_maps[0][0].device featmap_sizes = [ pred_maps[i].shape[-2:] for i in range(self.num_levels) ] mlvl_anchors = self.prior_generator.grid_priors( featmap_sizes, device=device) anchor_list = [mlvl_anchors for _ in range(num_imgs)] responsible_flag_list = [] for img_id in range(num_imgs): responsible_flag_list.append( self.responsible_flags(featmap_sizes, batch_gt_instances[img_id].bboxes, device)) target_maps_list, neg_maps_list = self.get_targets( anchor_list, responsible_flag_list, batch_gt_instances) losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( self.loss_by_feat_single, pred_maps, target_maps_list, neg_maps_list) return dict( loss_cls=losses_cls, loss_conf=losses_conf, loss_xy=losses_xy, loss_wh=losses_wh) def loss_by_feat_single(self, pred_map: Tensor, target_map: Tensor, neg_map: Tensor) -> tuple: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: pred_map (Tensor): Raw predictions for a single level. target_map (Tensor): The Ground-Truth target for a single level. neg_map (Tensor): The negative masks for a single level. Returns: tuple: loss_cls (Tensor): Classification loss. loss_conf (Tensor): Confidence loss. loss_xy (Tensor): Regression loss of x, y coordinate. loss_wh (Tensor): Regression loss of w, h coordinate. """ num_imgs = len(pred_map) pred_map = pred_map.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) neg_mask = neg_map.float() pos_mask = target_map[..., 4] pos_and_neg_mask = neg_mask + pos_mask pos_mask = pos_mask.unsqueeze(dim=-1) if torch.max(pos_and_neg_mask) > 1.: warnings.warn('There is overlap between pos and neg sample.') pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) pred_xy = pred_map[..., :2] pred_wh = pred_map[..., 2:4] pred_conf = pred_map[..., 4] pred_label = pred_map[..., 5:] target_xy = target_map[..., :2] target_wh = target_map[..., 2:4] target_conf = target_map[..., 4] target_label = target_map[..., 5:] loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) loss_conf = self.loss_conf( pred_conf, target_conf, weight=pos_and_neg_mask) loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) return loss_cls, loss_conf, loss_xy, loss_wh def get_targets(self, anchor_list: List[List[Tensor]], responsible_flag_list: List[List[Tensor]], batch_gt_instances: List[InstanceData]) -> tuple: """Compute target maps for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_total_anchors, 4). responsible_flag_list (list[list[Tensor]]): Multi level responsible flags of each image. Each element is a tensor of shape (num_total_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Usually returns a tuple containing learning targets. - target_map_list (list[Tensor]): Target map of each level. - neg_map_list (list[Tensor]): Negative map of each level. """ num_imgs = len(anchor_list) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] results = multi_apply(self._get_targets_single, anchor_list, responsible_flag_list, batch_gt_instances) all_target_maps, all_neg_maps = results assert num_imgs == len(all_target_maps) == len(all_neg_maps) target_maps_list = images_to_levels(all_target_maps, num_level_anchors) neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) return target_maps_list, neg_maps_list def _get_targets_single(self, anchors: List[Tensor], responsible_flags: List[Tensor], gt_instances: InstanceData) -> tuple: """Generate matching bounding box prior and converted GT. Args: anchors (List[Tensor]): Multi-level anchors of the image. responsible_flags (List[Tensor]): Multi-level responsible flags of anchors gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. Returns: tuple: target_map (Tensor): Predication target map of each scale level, shape (num_total_anchors, 5+num_classes) neg_map (Tensor): Negative map of each scale level, shape (num_total_anchors,) """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels anchor_strides = [] for i in range(len(anchors)): anchor_strides.append( torch.tensor(self.featmap_strides[i], device=gt_bboxes.device).repeat(len(anchors[i]))) concat_anchors = torch.cat(anchors) concat_responsible_flags = torch.cat(responsible_flags) anchor_strides = torch.cat(anchor_strides) assert len(anchor_strides) == len(concat_anchors) == \ len(concat_responsible_flags) pred_instances = InstanceData( priors=concat_anchors, responsible_flags=concat_responsible_flags) assign_result = self.assigner.assign(pred_instances, gt_instances) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) target_map = concat_anchors.new_zeros( concat_anchors.size(0), self.num_attrib) target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( sampling_result.pos_priors, sampling_result.pos_gt_bboxes, anchor_strides[sampling_result.pos_inds]) target_map[sampling_result.pos_inds, 4] = 1 gt_labels_one_hot = F.one_hot( gt_labels, num_classes=self.num_classes).float() if self.one_hot_smoother != 0: # label smooth gt_labels_one_hot = gt_labels_one_hot * ( 1 - self.one_hot_smoother ) + self.one_hot_smoother / self.num_classes target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ sampling_result.pos_assigned_gt_inds] neg_map = concat_anchors.new_zeros( concat_anchors.size(0), dtype=torch.uint8) neg_map[sampling_result.neg_inds] = 1 return target_map, neg_map def responsible_flags(self, featmap_sizes: List[tuple], gt_bboxes: Tensor, device: str) -> List[Tensor]: """Generate responsible anchor flags of grid cells in multiple scales. Args: featmap_sizes (List[tuple]): List of feature map sizes in multiple feature levels. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). device (str): Device where the anchors will be put on. Return: List[Tensor]: responsible flags of anchors in multiple level """ assert self.num_levels == len(featmap_sizes) multi_level_responsible_flags = [] for i in range(self.num_levels): anchor_stride = self.prior_generator.strides[i] feat_h, feat_w = featmap_sizes[i] gt_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) gt_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) gt_grid_x = torch.floor(gt_cx / anchor_stride[0]).long() gt_grid_y = torch.floor(gt_cy / anchor_stride[1]).long() # row major indexing gt_bboxes_grid_idx = gt_grid_y * feat_w + gt_grid_x responsible_grid = torch.zeros( feat_h * feat_w, dtype=torch.uint8, device=device) responsible_grid[gt_bboxes_grid_idx] = 1 responsible_grid = responsible_grid[:, None].expand( responsible_grid.size(0), self.prior_generator.num_base_priors[i]).contiguous().view(-1) multi_level_responsible_flags.append(responsible_grid) return multi_level_responsible_flags
22,709
42.011364
79
py
ERD
ERD-main/mmdet/models/dense_heads/centripetal_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmengine.model import normal_init from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptInstanceList, OptMultiConfig) from ..utils import multi_apply from .corner_head import CornerHead @MODELS.register_module() class CentripetalHead(CornerHead): """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection. CentripetalHead inherits from :class:`CornerHead`. It removes the embedding branch and adds guiding shift and centripetal shift branches. More details can be found in the `paper <https://arxiv.org/abs/2003.09119>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Defaults to 2. corner_emb_channels (int): Channel of embedding vector. Defaults to 1. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CornerHead. loss_heatmap (:obj:`ConfigDict` or dict): Config of corner heatmap loss. Defaults to GaussianFocalLoss. loss_embedding (:obj:`ConfigDict` or dict): Config of corner embedding loss. Defaults to AssociativeEmbeddingLoss. loss_offset (:obj:`ConfigDict` or dict): Config of corner offset loss. Defaults to SmoothL1Loss. loss_guiding_shift (:obj:`ConfigDict` or dict): Config of guiding shift loss. Defaults to SmoothL1Loss. loss_centripetal_shift (:obj:`ConfigDict` or dict): Config of centripetal shift loss. Defaults to SmoothL1Loss. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. """ def __init__(self, *args, centripetal_shift_channels: int = 2, guiding_shift_channels: int = 2, feat_adaption_conv_kernel: int = 3, loss_guiding_shift: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=0.05), loss_centripetal_shift: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg: OptMultiConfig = None, **kwargs) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' assert centripetal_shift_channels == 2, ( 'CentripetalHead only support centripetal_shift_channels == 2') self.centripetal_shift_channels = centripetal_shift_channels assert guiding_shift_channels == 2, ( 'CentripetalHead only support guiding_shift_channels == 2') self.guiding_shift_channels = guiding_shift_channels self.feat_adaption_conv_kernel = feat_adaption_conv_kernel super().__init__(*args, init_cfg=init_cfg, **kwargs) self.loss_guiding_shift = MODELS.build(loss_guiding_shift) self.loss_centripetal_shift = MODELS.build(loss_centripetal_shift) def _init_centripetal_layers(self) -> None: """Initialize centripetal layers. Including feature adaption deform convs (feat_adaption), deform offset prediction convs (dcn_off), guiding shift (guiding_shift) and centripetal shift ( centripetal_shift). Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_feat_adaption = nn.ModuleList() self.br_feat_adaption = nn.ModuleList() self.tl_dcn_offset = nn.ModuleList() self.br_dcn_offset = nn.ModuleList() self.tl_guiding_shift = nn.ModuleList() self.br_guiding_shift = nn.ModuleList() self.tl_centripetal_shift = nn.ModuleList() self.br_centripetal_shift = nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.br_feat_adaption.append( DeformConv2d(self.in_channels, self.in_channels, self.feat_adaption_conv_kernel, 1, 1)) self.tl_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.br_guiding_shift.append( self._make_layers( out_channels=self.guiding_shift_channels, in_channels=self.in_channels)) self.tl_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.br_dcn_offset.append( ConvModule( self.guiding_shift_channels, self.feat_adaption_conv_kernel**2 * self.guiding_shift_channels, 1, bias=False, act_cfg=None)) self.tl_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) self.br_centripetal_shift.append( self._make_layers( out_channels=self.centripetal_shift_channels, in_channels=self.in_channels)) def _init_layers(self) -> None: """Initialize layers for CentripetalHead. Including two parts: CornerHead layers and CentripetalHead layers """ super()._init_layers() # using _init_layers in CornerHead self._init_centripetal_layers() def init_weights(self) -> None: super().init_weights() for i in range(self.num_feat_levels): normal_init(self.tl_feat_adaption[i], std=0.01) normal_init(self.br_feat_adaption[i], std=0.01) normal_init(self.tl_dcn_offset[i].conv, std=0.1) normal_init(self.br_dcn_offset[i].conv, std=0.1) _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] _ = [ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] ] _ = [ x.conv.reset_parameters() for x in self.br_centripetal_shift[i] ] def forward_single(self, x: Tensor, lvl_ind: int) -> List[Tensor]: """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. Returns: tuple[Tensor]: A tuple of CentripetalHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_guiding_shift (Tensor): Predicted top-left guiding shift heatmap. - br_guiding_shift (Tensor): Predicted bottom-right guiding shift heatmap. - tl_centripetal_shift (Tensor): Predicted top-left centripetal shift heatmap. - br_centripetal_shift (Tensor): Predicted bottom-right centripetal shift heatmap. """ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( ).forward_single( x, lvl_ind, return_pool=True) tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, tl_dcn_offset) br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, br_dcn_offset) tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( tl_feat_adaption) br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( br_feat_adaption) result_list = [ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, br_guiding_shift, tl_centripetal_shift, br_centripetal_shift ] return result_list def loss_by_feat( self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], tl_guiding_shifts: List[Tensor], br_guiding_shifts: List[Tensor], tl_centripetal_shifts: List[Tensor], br_centripetal_shifts: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. - guiding_loss (list[Tensor]): Guiding shift losses of all feature levels. - centripetal_loss (list[Tensor]): Centripetal shift losses of all feature levels. """ gt_bboxes = [ gt_instances.bboxes for gt_instances in batch_gt_instances ] gt_labels = [ gt_instances.labels for gt_instances in batch_gt_instances ] targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, batch_img_metas[0]['batch_input_shape'], with_corner_emb=self.with_corner_emb, with_guiding_shift=True, with_centripetal_shift=True) mlvl_targets = [targets for _ in range(self.num_feat_levels)] [det_losses, off_losses, guiding_losses, centripetal_losses ] = multi_apply(self.loss_by_feat_single, tl_heats, br_heats, tl_offs, br_offs, tl_guiding_shifts, br_guiding_shifts, tl_centripetal_shifts, br_centripetal_shifts, mlvl_targets) loss_dict = dict( det_loss=det_losses, off_loss=off_losses, guiding_loss=guiding_losses, centripetal_loss=centripetal_losses) return loss_dict def loss_by_feat_single(self, tl_hmp: Tensor, br_hmp: Tensor, tl_off: Tensor, br_off: Tensor, tl_guiding_shift: Tensor, br_guiding_shift: Tensor, tl_centripetal_shift: Tensor, br_centripetal_shift: Tensor, targets: dict) -> Tuple[Tensor, ...]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_guiding_shift (Tensor): Top-left guiding shift for current level with shape (N, guiding_shift_channels, H, W). br_guiding_shift (Tensor): Bottom-right guiding shift for current level with shape (N, guiding_shift_channels, H, W). tl_centripetal_shift (Tensor): Top-left centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shift (Tensor): Bottom-right centripetal shift for current level with shape (N, centripetal_shift_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - off_loss (Tensor): Corner offset loss. - guiding_loss (Tensor): Guiding shift loss. - centripetal_loss (Tensor): Centripetal shift loss. """ targets['corner_embedding'] = None det_loss, _, _, off_loss = super().loss_by_feat_single( tl_hmp, br_hmp, None, None, tl_off, br_off, targets) gt_tl_guiding_shift = targets['topleft_guiding_shift'] gt_br_guiding_shift = targets['bottomright_guiding_shift'] gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] gt_tl_heatmap = targets['topleft_heatmap'] gt_br_heatmap = targets['bottomright_heatmap'] # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_heatmap) br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_heatmap) # Guiding shift loss tl_guiding_loss = self.loss_guiding_shift( tl_guiding_shift, gt_tl_guiding_shift, tl_mask, avg_factor=tl_mask.sum()) br_guiding_loss = self.loss_guiding_shift( br_guiding_shift, gt_br_guiding_shift, br_mask, avg_factor=br_mask.sum()) guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 # Centripetal shift loss tl_centripetal_loss = self.loss_centripetal_shift( tl_centripetal_shift, gt_tl_centripetal_shift, tl_mask, avg_factor=tl_mask.sum()) br_centripetal_loss = self.loss_centripetal_shift( br_centripetal_shift, gt_br_centripetal_shift, br_mask, avg_factor=br_mask.sum()) centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 return det_loss, off_loss, guiding_loss, centripetal_loss def predict_by_feat(self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], tl_guiding_shifts: List[Tensor], br_guiding_shifts: List[Tensor], tl_centripetal_shifts: List[Tensor], br_centripetal_shifts: List[Tensor], batch_img_metas: Optional[List[dict]] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for each level with shape (N, guiding_shift_channels, H, W). Useless in this function, we keep this arg because it's the raw output from CentripetalHead. tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). br_centripetal_shifts (list[Tensor]): Bottom-right centripetal shifts for each level with shape (N, centripetal_shift_channels, H, W). batch_img_metas (list[dict], optional): Batch image meta info. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( batch_img_metas) result_list = [] for img_id in range(len(batch_img_metas)): result_list.append( self._predict_by_feat_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], batch_img_metas[img_id], tl_emb=None, br_emb=None, tl_centripetal_shift=tl_centripetal_shifts[-1][ img_id:img_id + 1, :], br_centripetal_shift=br_centripetal_shifts[-1][ img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list
21,678
46.128261
79
py
ERD
ERD-main/mmdet/models/dense_heads/paa_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import numpy as np import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList) from ..layers import multiclass_nms from ..utils import levels_to_images, multi_apply from . import ATSSHead EPS = 1e-12 try: import sklearn.mixture as skm except ImportError: skm = None @MODELS.register_module() class PAAHead(ATSSHead): """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU Prediction for Object Detection. Code is modified from the `official github repo <https://github.com/kkhoot/PAA/blob/master/paa_core /modeling/rpn/paa/loss.py>`_. More details can be found in the `paper <https://arxiv.org/abs/2007.08103>`_ . Args: topk (int): Select topk samples with smallest loss in each level. score_voting (bool): Whether to use score voting in post-process. covariance_type : String describing the type of covariance parameters to be used in :class:`sklearn.mixture.GaussianMixture`. It must be one of: - 'full': each component has its own general covariance matrix - 'tied': all components share the same general covariance matrix - 'diag': each component has its own diagonal covariance matrix - 'spherical': each component has its own single variance Default: 'diag'. From 'full' to 'spherical', the gmm fitting process is faster yet the performance could be influenced. For most cases, 'diag' should be a good choice. """ def __init__(self, *args, topk: int = 9, score_voting: bool = True, covariance_type: str = 'diag', **kwargs): # topk used in paa reassign process self.topk = topk self.with_score_voting = score_voting self.covariance_type = covariance_type super().__init__(*args, **kwargs) def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) iou_preds (list[Tensor]): iou_preds for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss gmm_assignment. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, ) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [ item.reshape(-1, self.cls_out_channels) for item in cls_scores ] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape(-1, 1) for item in iou_preds] pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): reassign_labels, reassign_label_weight, \ reassign_bbox_weights, num_pos = multi_apply( self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) # convert all tensor list to a flatten tensor cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) labels = torch.cat(reassign_labels, 0).view(-1) flatten_anchors = torch.cat( [torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view(-1) bboxes_target = torch.cat(bboxes_target, 0).view(-1, bboxes_target[0].size(-1)) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) losses_cls = self.loss_cls( cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(batch_img_metas))) # avoid num_pos=0 if num_pos: pos_bbox_pred = self.bbox_coder.decode( flatten_anchors[pos_inds_flatten], bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps( pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness( iou_preds[pos_inds_flatten], iou_target.unsqueeze(-1), avg_factor=num_pos) losses_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, iou_target.clamp(min=EPS), avg_factor=iou_target.sum()) else: losses_iou = iou_preds.sum() * 0 losses_bbox = bbox_preds.sum() * 0 return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) def get_pos_loss(self, anchors: List[Tensor], cls_score: Tensor, bbox_pred: Tensor, label: Tensor, label_weight: Tensor, bbox_target: dict, bbox_weight: Tensor, pos_inds: Tensor) -> Tensor: """Calculate loss of all potential positive samples obtained from first match process. Args: anchors (list[Tensor]): Anchors of each scale. cls_score (Tensor): Box scores of single image with shape (num_anchors, num_classes) bbox_pred (Tensor): Box energies / deltas of single image with shape (num_anchors, 4) label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_target (dict): Regression target of each anchor with shape (num_anchors, 4). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. Returns: Tensor: Losses of all positive samples in single image. """ if not len(pos_inds): return cls_score.new([]), anchors_all_level = torch.cat(anchors, 0) pos_scores = cls_score[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_label = label[pos_inds] pos_label_weight = label_weight[pos_inds] pos_bbox_target = bbox_target[pos_inds] pos_bbox_weight = bbox_weight[pos_inds] pos_anchors = anchors_all_level[pos_inds] pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) # to keep loss dimension loss_cls = self.loss_cls( pos_scores, pos_label, pos_label_weight, avg_factor=1.0, reduction_override='none') loss_bbox = self.loss_bbox( pos_bbox_pred, pos_bbox_target, pos_bbox_weight, avg_factor=1.0, # keep same loss weight before reassign reduction_override='none') loss_cls = loss_cls.sum(-1) pos_loss = loss_bbox + loss_cls return pos_loss, def paa_reassign(self, pos_losses: Tensor, label: Tensor, label_weight: Tensor, bbox_weight: Tensor, pos_inds: Tensor, pos_gt_inds: Tensor, anchors: List[Tensor]) -> tuple: """Fit loss to GMM distribution and separate positive, ignore, negative samples again with GMM model. Args: pos_losses (Tensor): Losses of all positive samples in single image. label (Tensor): classification target of each anchor with shape (num_anchors,) label_weight (Tensor): Classification loss weight of each anchor with shape (num_anchors). bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). pos_inds (Tensor): Index of all positive samples got from first assign process. pos_gt_inds (Tensor): Gt_index of all positive samples got from first assign process. anchors (list[Tensor]): Anchors of each scale. Returns: tuple: Usually returns a tuple containing learning targets. - label (Tensor): classification target of each anchor after paa assign, with shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each anchor after paa assign, with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape (num_anchors, 4). - num_pos (int): The number of positive samples after paa assign. """ if not len(pos_inds): return label, label_weight, bbox_weight, 0 label = label.clone() label_weight = label_weight.clone() bbox_weight = bbox_weight.clone() num_gt = pos_gt_inds.max() + 1 num_level = len(anchors) num_anchors_each_level = [item.size(0) for item in anchors] num_anchors_each_level.insert(0, 0) inds_level_interval = np.cumsum(num_anchors_each_level) pos_level_mask = [] for i in range(num_level): mask = (pos_inds >= inds_level_interval[i]) & ( pos_inds < inds_level_interval[i + 1]) pos_level_mask.append(mask) pos_inds_after_paa = [label.new_tensor([])] ignore_inds_after_paa = [label.new_tensor([])] for gt_ind in range(num_gt): pos_inds_gmm = [] pos_loss_gmm = [] gt_mask = pos_gt_inds == gt_ind for level in range(num_level): level_mask = pos_level_mask[level] level_gt_mask = level_mask & gt_mask value, topk_inds = pos_losses[level_gt_mask].topk( min(level_gt_mask.sum(), self.topk), largest=False) pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) pos_loss_gmm.append(value) pos_inds_gmm = torch.cat(pos_inds_gmm) pos_loss_gmm = torch.cat(pos_loss_gmm) # fix gmm need at least two sample if len(pos_inds_gmm) < 2: continue device = pos_inds_gmm.device pos_loss_gmm, sort_inds = pos_loss_gmm.sort() pos_inds_gmm = pos_inds_gmm[sort_inds] pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() means_init = np.array([min_loss, max_loss]).reshape(2, 1) weights_init = np.array([0.5, 0.5]) precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full if self.covariance_type == 'spherical': precisions_init = precisions_init.reshape(2) elif self.covariance_type == 'diag': precisions_init = precisions_init.reshape(2, 1) elif self.covariance_type == 'tied': precisions_init = np.array([[1.0]]) if skm is None: raise ImportError('Please run "pip install sklearn" ' 'to install sklearn first.') gmm = skm.GaussianMixture( 2, weights_init=weights_init, means_init=means_init, precisions_init=precisions_init, covariance_type=self.covariance_type) gmm.fit(pos_loss_gmm) gmm_assignment = gmm.predict(pos_loss_gmm) scores = gmm.score_samples(pos_loss_gmm) gmm_assignment = torch.from_numpy(gmm_assignment).to(device) scores = torch.from_numpy(scores).to(device) pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( gmm_assignment, scores, pos_inds_gmm) pos_inds_after_paa.append(pos_inds_temp) ignore_inds_after_paa.append(ignore_inds_temp) pos_inds_after_paa = torch.cat(pos_inds_after_paa) ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) reassign_ids = pos_inds[reassign_mask] label[reassign_ids] = self.num_classes label_weight[ignore_inds_after_paa] = 0 bbox_weight[reassign_ids] = 0 num_pos = len(pos_inds_after_paa) return label, label_weight, bbox_weight, num_pos def gmm_separation_scheme(self, gmm_assignment: Tensor, scores: Tensor, pos_inds_gmm: Tensor) -> Tuple[Tensor, Tensor]: """A general separation scheme for gmm model. It separates a GMM distribution of candidate samples into three parts, 0 1 and uncertain areas, and you can implement other separation schemes by rewriting this function. Args: gmm_assignment (Tensor): The prediction of GMM which is of shape (num_samples,). The 0/1 value indicates the distribution that each sample comes from. scores (Tensor): The probability of sample coming from the fit GMM distribution. The tensor is of shape (num_samples,). pos_inds_gmm (Tensor): All the indexes of samples which are used to fit GMM model. The tensor is of shape (num_samples,) Returns: tuple[Tensor, Tensor]: The indices of positive and ignored samples. - pos_inds_temp (Tensor): Indices of positive samples. - ignore_inds_temp (Tensor): Indices of ignore samples. """ # The implementation is (c) in Fig.3 in origin paper instead of (b). # You can refer to issues such as # https://github.com/kkhoot/PAA/issues/8 and # https://github.com/kkhoot/PAA/issues/9. fgs = gmm_assignment == 0 pos_inds_temp = fgs.new_tensor([], dtype=torch.long) ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) if fgs.nonzero().numel(): _, pos_thr_ind = scores[fgs].topk(1) pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] ignore_inds_temp = pos_inds_gmm.new_tensor([]) return pos_inds_temp, ignore_inds_temp def get_targets(self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get targets for PAA head. This method is almost the same as `AnchorHead.get_targets()`. We direct return the results from _get_targets_single instead map it to levels by images_to_levels function. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: Usually returns a tuple containing learning targets. - labels (list[Tensor]): Labels of all anchors, each with shape (num_anchors,). - label_weights (list[Tensor]): Label weights of all anchor. each with shape (num_anchors,). - bbox_targets (list[Tensor]): BBox targets of all anchors. each with shape (num_anchors, 4). - bbox_weights (list[Tensor]): BBox weights of all anchors. each with shape (num_anchors, 4). - pos_inds (list[Tensor]): Contains all index of positive sample in all anchor. - gt_inds (list[Tensor]): Contains all gt_index of positive sample in all anchor. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, valid_neg_inds, sampling_result) = results # Due to valid flag of anchors, we have to calculate the real pos_inds # in origin anchor set. pos_inds = [] for i, single_labels in enumerate(labels): pos_mask = (0 <= single_labels) & ( single_labels < self.num_classes) pos_inds.append(pos_mask.nonzero().view(-1)) gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, gt_inds) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. This method is same as `AnchorHead._get_targets_single()`. """ assert unmap_outputs, 'We must map outputs back to the original' \ 'set of anchors in PAAhead' return super(ATSSHead, self)._get_targets_single( flat_anchors, valid_flags, gt_instances, img_meta, gt_instances_ignore, unmap_outputs=True) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. This method is same as `BaseDenseHead.get_results()`. """ assert with_nms, 'PAA only supports "with_nms=True" now and it ' \ 'means PAAHead does not support ' \ 'test-time augmentation' return super().predict_by_feat( cls_scores=cls_scores, bbox_preds=bbox_preds, score_factors=score_factors, batch_img_metas=batch_img_metas, cfg=cfg, rescale=rescale, with_nms=with_nms) def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factors from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (:obj:`ConfigDict` or dict, optional): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_score_factors = [] for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() if 0 < nms_pre < scores.shape[0]: max_scores, _ = (scores * score_factor[:, None]).sqrt().max(dim=1) _, topk_inds = max_scores.topk(nms_pre) priors = priors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] score_factor = score_factor[topk_inds] bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_score_factors.append(score_factor) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_post_process(results, cfg, rescale, with_nms, img_meta) def _bbox_post_process(self, results: InstanceData, cfg: ConfigType, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None): """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually with_nms is False is used for aug test. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (:obj:`ConfigDict` or dict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if rescale: results.bboxes /= results.bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = results.scores.new_zeros(results.scores.shape[0], 1) mlvl_scores = torch.cat([results.scores, padding], dim=1) mlvl_nms_scores = (mlvl_scores * results.score_factors[:, None]).sqrt() det_bboxes, det_labels = multiclass_nms( results.bboxes, mlvl_nms_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=None) if self.with_score_voting and len(det_bboxes) > 0: det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, results.bboxes, mlvl_nms_scores, cfg.score_thr) nms_results = InstanceData() nms_results.bboxes = det_bboxes[:, :-1] nms_results.scores = det_bboxes[:, -1] nms_results.labels = det_labels return nms_results def score_voting(self, det_bboxes: Tensor, det_labels: Tensor, mlvl_bboxes: Tensor, mlvl_nms_scores: Tensor, score_thr: float) -> Tuple[Tensor, Tensor]: """Implementation of score voting method works on each remaining boxes after NMS procedure. Args: det_bboxes (Tensor): Remaining boxes after NMS procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). det_labels (Tensor): The label of remaining boxes, with shape (k, 1),Labels are 0-based. mlvl_bboxes (Tensor): All boxes before the NMS procedure, with shape (num_anchors,4). mlvl_nms_scores (Tensor): The scores of all boxes which is used in the NMS procedure, with shape (num_anchors, num_class) score_thr (float): The score threshold of bboxes. Returns: tuple: Usually returns a tuple containing voting results. - det_bboxes_voted (Tensor): Remaining boxes after score voting procedure, with shape (k, 5), each dimension means (x1, y1, x2, y2, score). - det_labels_voted (Tensor): Label of remaining bboxes after voting, with shape (num_anchors,). """ candidate_mask = mlvl_nms_scores > score_thr candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False) candidate_inds = candidate_mask_nonzeros[:, 0] candidate_labels = candidate_mask_nonzeros[:, 1] candidate_bboxes = mlvl_bboxes[candidate_inds] candidate_scores = mlvl_nms_scores[candidate_mask] det_bboxes_voted = [] det_labels_voted = [] for cls in range(self.cls_out_channels): candidate_cls_mask = candidate_labels == cls if not candidate_cls_mask.any(): continue candidate_cls_scores = candidate_scores[candidate_cls_mask] candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] det_cls_mask = det_labels == cls det_cls_bboxes = det_bboxes[det_cls_mask].view( -1, det_bboxes.size(-1)) det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], candidate_cls_bboxes) for det_ind in range(len(det_cls_bboxes)): single_det_ious = det_candidate_ious[det_ind] pos_ious_mask = single_det_ious > 0.01 pos_ious = single_det_ious[pos_ious_mask] pos_bboxes = candidate_cls_bboxes[pos_ious_mask] pos_scores = candidate_cls_scores[pos_ious_mask] pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * pos_scores)[:, None] voted_box = torch.sum( pis * pos_bboxes, dim=0) / torch.sum( pis, dim=0) voted_score = det_cls_bboxes[det_ind][-1:][None, :] det_bboxes_voted.append( torch.cat((voted_box[None, :], voted_score), dim=1)) det_labels_voted.append(cls) det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) det_labels_voted = det_labels.new_tensor(det_labels_voted) return det_bboxes_voted, det_labels_voted
33,336
44.604651
79
py
ERD
ERD-main/mmdet/models/dense_heads/retina_sepbn_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import bias_init_with_prob, normal_init from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import OptConfigType, OptMultiConfig from .anchor_head import AnchorHead @MODELS.register_module() class RetinaSepBNHead(AnchorHead): """"RetinaHead with separate BN. In RetinaHead, conv/norm layers are shared across different FPN levels, while in RetinaSepBNHead, conv layers are shared across different FPN levels, but BN layers are separated. """ def __init__(self, num_classes: int, num_ins: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for j in range(self.stacked_convs): chn = self.in_channels if j == 0 else self.feat_channels cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) def init_weights(self) -> None: """Initialize weights of the head.""" super().init_weights() for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for i, x in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return cls_scores, bbox_preds
4,841
36.828125
79
py
ERD
ERD-main/mmdet/models/dense_heads/anchor_free_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import abstractmethod from typing import Any, List, Sequence, Tuple, Union import torch.nn as nn from mmcv.cnn import ConvModule from numpy import ndarray from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList) from ..task_modules.prior_generators import MlvlPointGenerator from ..utils import multi_apply from .base_dense_head import BaseDenseHead StrideType = Union[Sequence[int], Sequence[Tuple[int, int]]] @MODELS.register_module() class AnchorFreeHead(BaseDenseHead): """Anchor-free head (FCOS, Fovea, RepPoints, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. stacked_convs (int): Number of stacking convs of the head. strides (Sequence[int] or Sequence[Tuple[int, int]]): Downsample factor of each feature map. dcn_on_last_conv (bool): If true, use dcn in the last layer of towers. Defaults to False. conv_bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults 'DistancePointBBoxCoder'. conv_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, Optional): Config dict for normalization layer. Defaults to None. train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of anchor-free head. test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of anchor-free head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. """ # noqa: W605 _version = 1 def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, stacked_convs: int = 4, strides: StrideType = (4, 8, 16, 32, 64), dcn_on_last_conv: bool = False, conv_bias: Union[bool, str] = 'auto', loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0), bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert conv_bias == 'auto' or isinstance(conv_bias, bool) self.conv_bias = conv_bias self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) self.bbox_coder = TASK_UTILS.build(bbox_coder) self.prior_generator = MlvlPointGenerator(strides) # In order to keep a more general interface and be consistent with # anchor_head. We can think of point like one anchor self.num_base_priors = self.prior_generator.num_base_priors[0] self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self) -> None: """Initialize classification conv layers of the head.""" self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self) -> None: """Initialize bbox regression conv layers of the head.""" self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels if self.dcn_on_last_conv and i == self.stacked_convs - 1: conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self) -> None: """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def _load_from_state_dict(self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: Union[List[str], str], unexpected_keys: Union[List[str], str], error_msgs: Union[List[str], str]) -> None: """Hack some keys of the model state dict so that can load checkpoints of previous version.""" version = local_metadata.get('version', None) if version is None: # the key is different in early versions # for example, 'fcos_cls' become 'conv_cls' now bbox_head_keys = [ k for k in state_dict.keys() if k.startswith(prefix) ] ori_predictor_keys = [] new_predictor_keys = [] # e.g. 'fcos_cls' or 'fcos_reg' for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') if len(key) < 2: conv_name = None elif key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: conv_name = None if conv_name is not None: key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop(-1) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop( ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually contain classification scores and bbox predictions. - cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale \ level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, x)[:2] def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: Scores for each class, bbox predictions, features after classification and regression conv layers, some models needs these features like FCOS. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return cls_score, bbox_pred, cls_feat, reg_feat @abstractmethod def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. """ raise NotImplementedError @abstractmethod def get_targets(self, points: List[Tensor], batch_gt_instances: InstanceList) -> Any: """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. """ raise NotImplementedError # TODO refactor aug_test def aug_test(self, aug_batch_feats: List[Tensor], aug_batch_img_metas: List[List[Tensor]], rescale: bool = False) -> List[ndarray]: """Test function with test time augmentation. Args: aug_batch_feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. aug_batch_img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes( aug_batch_feats, aug_batch_img_metas, rescale=rescale)
13,049
40.037736
79
py
ERD
ERD-main/mmdet/models/dense_heads/boxinst_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn.functional as F from mmengine import MessageHub from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList from ..utils.misc import unfold_wo_center from .condinst_head import CondInstBboxHead, CondInstMaskHead @MODELS.register_module() class BoxInstBboxHead(CondInstBboxHead): """BoxInst box head used in https://arxiv.org/abs/2012.02310.""" def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) @MODELS.register_module() class BoxInstMaskHead(CondInstMaskHead): """BoxInst mask head used in https://arxiv.org/abs/2012.02310. This head outputs the mask for BoxInst. Args: pairwise_size (dict): The size of neighborhood for each pixel. Defaults to 3. pairwise_dilation (int): The dilation of neighborhood for each pixel. Defaults to 2. warmup_iters (int): Warmup iterations for pair-wise loss. Defaults to 10000. """ def __init__(self, *arg, pairwise_size: int = 3, pairwise_dilation: int = 2, warmup_iters: int = 10000, **kwargs) -> None: self.pairwise_size = pairwise_size self.pairwise_dilation = pairwise_dilation self.warmup_iters = warmup_iters super().__init__(*arg, **kwargs) def get_pairwise_affinity(self, mask_logits: Tensor) -> Tensor: """Compute the pairwise affinity for each pixel.""" log_fg_prob = F.logsigmoid(mask_logits).unsqueeze(1) log_bg_prob = F.logsigmoid(-mask_logits).unsqueeze(1) log_fg_prob_unfold = unfold_wo_center( log_fg_prob, kernel_size=self.pairwise_size, dilation=self.pairwise_dilation) log_bg_prob_unfold = unfold_wo_center( log_bg_prob, kernel_size=self.pairwise_size, dilation=self.pairwise_dilation) # the probability of making the same prediction: # p_i * p_j + (1 - p_i) * (1 - p_j) # we compute the the probability in log space # to avoid numerical instability log_same_fg_prob = log_fg_prob[:, :, None] + log_fg_prob_unfold log_same_bg_prob = log_bg_prob[:, :, None] + log_bg_prob_unfold # TODO: Figure out the difference between it and directly sum max_ = torch.max(log_same_fg_prob, log_same_bg_prob) log_same_prob = torch.log( torch.exp(log_same_fg_prob - max_) + torch.exp(log_same_bg_prob - max_)) + max_ return -log_same_prob[:, 0] def loss_by_feat(self, mask_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], positive_infos: InstanceList, **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (list[Tensor]): List of predicted masks, each has shape (num_classes, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Information of positive samples of each image that are assigned in detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert positive_infos is not None, \ 'positive_infos should not be None in `BoxInstMaskHead`' losses = dict() loss_mask_project = 0. loss_mask_pairwise = 0. num_imgs = len(mask_preds) total_pos = 0. avg_fatcor = 0. for idx in range(num_imgs): (mask_pred, pos_mask_targets, pos_pairwise_masks, num_pos) = \ self._get_targets_single( mask_preds[idx], batch_gt_instances[idx], positive_infos[idx]) # mask loss total_pos += num_pos if num_pos == 0 or pos_mask_targets is None: loss_project = mask_pred.new_zeros(1).mean() loss_pairwise = mask_pred.new_zeros(1).mean() avg_fatcor += 0. else: # compute the project term loss_project_x = self.loss_mask( mask_pred.max(dim=1, keepdim=True)[0], pos_mask_targets.max(dim=1, keepdim=True)[0], reduction_override='none').sum() loss_project_y = self.loss_mask( mask_pred.max(dim=2, keepdim=True)[0], pos_mask_targets.max(dim=2, keepdim=True)[0], reduction_override='none').sum() loss_project = loss_project_x + loss_project_y # compute the pairwise term pairwise_affinity = self.get_pairwise_affinity(mask_pred) avg_fatcor += pos_pairwise_masks.sum().clamp(min=1.0) loss_pairwise = (pairwise_affinity * pos_pairwise_masks).sum() loss_mask_project += loss_project loss_mask_pairwise += loss_pairwise if total_pos == 0: total_pos += 1 # avoid nan if avg_fatcor == 0: avg_fatcor += 1 # avoid nan loss_mask_project = loss_mask_project / total_pos loss_mask_pairwise = loss_mask_pairwise / avg_fatcor message_hub = MessageHub.get_current_instance() iter = message_hub.get_info('iter') warmup_factor = min(iter / float(self.warmup_iters), 1.0) loss_mask_pairwise *= warmup_factor losses.update( loss_mask_project=loss_mask_project, loss_mask_pairwise=loss_mask_pairwise) return losses def _get_targets_single(self, mask_preds: Tensor, gt_instances: InstanceData, positive_info: InstanceData): """Compute targets for predictions of single image. Args: mask_preds (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. positive_info (:obj:`InstanceData`): Information of positive samples that are assigned in detection head. It usually contains following keys. - pos_assigned_gt_inds (Tensor): Assigner GT indexes of positive proposals, has shape (num_pos, ) - pos_inds (Tensor): Positive index of image, has shape (num_pos, ). - param_pred (Tensor): Positive param preditions with shape (num_pos, num_params). Returns: tuple: Usually returns a tuple containing learning targets. - mask_preds (Tensor): Positive predicted mask with shape (num_pos, mask_h, mask_w). - pos_mask_targets (Tensor): Positive mask targets with shape (num_pos, mask_h, mask_w). - pos_pairwise_masks (Tensor): Positive pairwise masks with shape: (num_pos, num_neighborhood, mask_h, mask_w). - num_pos (int): Positive numbers. """ gt_bboxes = gt_instances.bboxes device = gt_bboxes.device # Note that gt_masks are generated by full box # from BoxInstDataPreprocessor gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device).float() # Note that pairwise_masks are generated by image color similarity # from BoxInstDataPreprocessor pairwise_masks = gt_instances.pairwise_masks pairwise_masks = pairwise_masks.to(device=device) # process with mask targets pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds') scores = positive_info.get('scores') centernesses = positive_info.get('centernesses') num_pos = pos_assigned_gt_inds.size(0) if gt_masks.size(0) == 0 or num_pos == 0: return mask_preds, None, None, 0 # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if (self.max_masks_to_train != -1) and \ (num_pos > self.max_masks_to_train): perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] mask_preds = mask_preds[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train elif self.topk_masks_per_img != -1: unique_gt_inds = pos_assigned_gt_inds.unique() num_inst_per_gt = max( int(self.topk_masks_per_img / len(unique_gt_inds)), 1) keep_mask_preds = [] keep_pos_assigned_gt_inds = [] for gt_ind in unique_gt_inds: per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind) mask_preds_per_inst = mask_preds[per_inst_pos_inds] gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds] if sum(per_inst_pos_inds) > num_inst_per_gt: per_inst_scores = scores[per_inst_pos_inds].sigmoid().max( dim=1)[0] per_inst_centerness = centernesses[ per_inst_pos_inds].sigmoid().reshape(-1, ) select = (per_inst_scores * per_inst_centerness).topk( k=num_inst_per_gt, dim=0)[1] mask_preds_per_inst = mask_preds_per_inst[select] gt_inds_per_inst = gt_inds_per_inst[select] keep_mask_preds.append(mask_preds_per_inst) keep_pos_assigned_gt_inds.append(gt_inds_per_inst) mask_preds = torch.cat(keep_mask_preds) pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds) num_pos = pos_assigned_gt_inds.size(0) # Follow the origin implement start = int(self.mask_out_stride // 2) gt_masks = gt_masks[:, start::self.mask_out_stride, start::self.mask_out_stride] gt_masks = gt_masks.gt(0.5).float() pos_mask_targets = gt_masks[pos_assigned_gt_inds] pos_pairwise_masks = pairwise_masks[pos_assigned_gt_inds] pos_pairwise_masks = pos_pairwise_masks * pos_mask_targets.unsqueeze(1) return (mask_preds, pos_mask_targets, pos_pairwise_masks, num_pos)
10,963
42.335968
79
py
ERD
ERD-main/mmdet/models/dense_heads/maskformer_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d from mmengine.model import caffe2_xavier_init from mmengine.structures import InstanceData, PixelData from torch import Tensor from mmdet.models.layers.pixel_decoder import PixelDecoder from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptMultiConfig, reduce_mean) from ..layers import DetrTransformerDecoder, SinePositionalEncoding from ..utils import multi_apply, preprocess_panoptic_gt from .anchor_free_head import AnchorFreeHead @MODELS.register_module() class MaskFormerHead(AnchorFreeHead): """Implements the MaskFormer head. See `Per-Pixel Classification is Not All You Need for Semantic Segmentation <https://arxiv.org/pdf/2107.06278>`_ for details. Args: in_channels (list[int]): Number of channels in the input feature map. feat_channels (int): Number of channels for feature. out_channels (int): Number of channels for output. num_things_classes (int): Number of things. num_stuff_classes (int): Number of stuff. num_queries (int): Number of query in Transformer. pixel_decoder (:obj:`ConfigDict` or dict): Config for pixel decoder. enforce_decoder_input_project (bool): Whether to add a layer to change the embed_dim of transformer encoder in pixel decoder to the embed_dim of transformer decoder. Defaults to False. transformer_decoder (:obj:`ConfigDict` or dict): Config for transformer decoder. positional_encoding (:obj:`ConfigDict` or dict): Config for transformer decoder position encoding. loss_cls (:obj:`ConfigDict` or dict): Config of the classification loss. Defaults to `CrossEntropyLoss`. loss_mask (:obj:`ConfigDict` or dict): Config of the mask loss. Defaults to `FocalLoss`. loss_dice (:obj:`ConfigDict` or dict): Config of the dice loss. Defaults to `DiceLoss`. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of MaskFormer head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of MaskFormer head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels: List[int], feat_channels: int, out_channels: int, num_things_classes: int = 80, num_stuff_classes: int = 53, num_queries: int = 100, pixel_decoder: ConfigType = ..., enforce_decoder_input_project: bool = False, transformer_decoder: ConfigType = ..., positional_encoding: ConfigType = dict( num_feats=128, normalize=True), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=[1.0] * 133 + [0.1]), loss_mask: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=20.0), loss_dice: ConfigType = dict( type='DiceLoss', use_sigmoid=True, activate=True, naive_dice=True, loss_weight=1.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: super(AnchorFreeHead, self).__init__(init_cfg=init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = self.num_things_classes + self.num_stuff_classes self.num_queries = num_queries pixel_decoder.update( in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = MODELS.build(pixel_decoder) self.transformer_decoder = DetrTransformerDecoder( **transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims if type(self.pixel_decoder) == PixelDecoder and ( self.decoder_embed_dims != in_channels[-1] or enforce_decoder_input_project): self.decoder_input_proj = Conv2d( in_channels[-1], self.decoder_embed_dims, kernel_size=1) else: self.decoder_input_proj = nn.Identity() self.decoder_pe = SinePositionalEncoding(**positional_encoding) self.query_embed = nn.Embedding(self.num_queries, out_channels) self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) self.mask_embed = nn.Sequential( nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = TASK_UTILS.build(train_cfg['assigner']) self.sampler = TASK_UTILS.build( train_cfg['sampler'], default_args=dict(context=self)) self.class_weight = loss_cls.class_weight self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.loss_dice = MODELS.build(loss_dice) def init_weights(self) -> None: if isinstance(self.decoder_input_proj, Conv2d): caffe2_xavier_init(self.decoder_input_proj, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def preprocess_gt( self, batch_gt_instances: InstanceList, batch_gt_semantic_segs: List[Optional[PixelData]]) -> InstanceList: """Preprocess the ground truth for all images. Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``labels``, each is ground truth labels of each bbox, with shape (num_gts, ) and ``masks``, each is ground truth masks of each instances of a image, shape (num_gts, h, w). gt_semantic_seg (list[Optional[PixelData]]): Ground truth of semantic segmentation, each with the shape (1, h, w). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. Returns: list[obj:`InstanceData`]: each contains the following keys - labels (Tensor): Ground truth class indices\ for a image, with shape (n, ), n is the sum of\ number of stuff type and number of instance in a image. - masks (Tensor): Ground truth mask for a\ image, with shape (n, h, w). """ num_things_list = [self.num_things_classes] * len(batch_gt_instances) num_stuff_list = [self.num_stuff_classes] * len(batch_gt_instances) gt_labels_list = [ gt_instances['labels'] for gt_instances in batch_gt_instances ] gt_masks_list = [ gt_instances['masks'] for gt_instances in batch_gt_instances ] gt_semantic_segs = [ None if gt_semantic_seg is None else gt_semantic_seg.sem_seg for gt_semantic_seg in batch_gt_semantic_segs ] targets = multi_apply(preprocess_panoptic_gt, gt_labels_list, gt_masks_list, gt_semantic_segs, num_things_list, num_stuff_list) labels, masks = targets batch_gt_instances = [ InstanceData(labels=label, masks=mask) for label, mask in zip(labels, masks) ] return batch_gt_instances def get_targets( self, cls_scores_list: List[Tensor], mask_preds_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], return_sampling_results: bool = False ) -> Tuple[List[Union[Tensor, int]]]: """Compute classification and mask targets for all images for a decoder layer. Args: cls_scores_list (list[Tensor]): Mask score logits from a single decoder layer for all images. Each with shape (num_queries, cls_out_channels). mask_preds_list (list[Tensor]): Mask logits from a single decoder layer for all images. Each with shape (num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. return_sampling_results (bool): Whether to return the sampling results. Defaults to False. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels of all images.\ Each with shape (num_queries, ). - label_weights_list (list[Tensor]): Label weights\ of all images. Each with shape (num_queries, ). - mask_targets_list (list[Tensor]): Mask targets of\ all images. Each with shape (num_queries, h, w). - mask_weights_list (list[Tensor]): Mask weights of\ all images. Each with shape (num_queries, ). - avg_factor (int): Average factor that is used to average\ the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `MaskPseudoSampler`, `avg_factor` is usually equal to the number of positive priors. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end. """ results = multi_apply(self._get_targets_single, cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) (labels_list, label_weights_list, mask_targets_list, mask_weights_list, pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] rest_results = list(results[7:]) avg_factor = sum( [results.avg_factor for results in sampling_results_list]) res = (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) if return_sampling_results: res = res + (sampling_results_list) return res + tuple(rest_results) def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor, gt_instances: InstanceData, img_meta: dict) -> Tuple[Tensor]: """Compute classification and mask targets for one image. Args: cls_score (Tensor): Mask score logits from a single decoder layer for one image. Shape (num_queries, cls_out_channels). mask_pred (Tensor): Mask logits for a single decoder layer for one image. Shape (num_queries, h, w). gt_instances (:obj:`InstanceData`): It contains ``labels`` and ``masks``. img_meta (dict): Image informtation. Returns: tuple: a tuple containing the following for one image. - labels (Tensor): Labels of each image. shape (num_queries, ). - label_weights (Tensor): Label weights of each image. shape (num_queries, ). - mask_targets (Tensor): Mask targets of each image. shape (num_queries, h, w). - mask_weights (Tensor): Mask weights of each image. shape (num_queries, ). - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. - sampling_result (:obj:`SamplingResult`): Sampling results. """ gt_masks = gt_instances.masks gt_labels = gt_instances.labels target_shape = mask_pred.shape[-2:] if gt_masks.shape[0] > 0: gt_masks_downsampled = F.interpolate( gt_masks.unsqueeze(1).float(), target_shape, mode='nearest').squeeze(1).long() else: gt_masks_downsampled = gt_masks pred_instances = InstanceData(scores=cls_score, masks=mask_pred) downsampled_gt_instances = InstanceData( labels=gt_labels, masks=gt_masks_downsampled) # assign and sample assign_result = self.assigner.assign( pred_instances=pred_instances, gt_instances=downsampled_gt_instances, img_meta=img_meta) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label target labels = gt_labels.new_full((self.num_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones(self.num_queries) # mask target mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries, )) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds, sampling_result) def loss_by_feat(self, all_cls_scores: Tensor, all_mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Dict[str, Tensor]: """Loss function. Args: all_cls_scores (Tensor): Classification scores for all decoder layers with shape (num_decoder, batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. all_mask_preds (Tensor): Mask scores for all decoder layers with shape (num_decoder, batch_size, num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_dec_layers = len(all_cls_scores) batch_gt_instances_list = [ batch_gt_instances for _ in range(num_dec_layers) ] img_metas_list = [batch_img_metas for _ in range(num_dec_layers)] losses_cls, losses_mask, losses_dice = multi_apply( self._loss_by_feat_single, all_cls_scores, all_mask_preds, batch_gt_instances_list, img_metas_list) loss_dict = dict() # loss from the last decoder layer loss_dict['loss_cls'] = losses_cls[-1] loss_dict['loss_mask'] = losses_mask[-1] loss_dict['loss_dice'] = losses_dice[-1] # loss from other decoder layers num_dec_layer = 0 for loss_cls_i, loss_mask_i, loss_dice_i in zip( losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i num_dec_layer += 1 return loss_dict def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Tuple[Tensor]: """Loss function for outputs from a single decoder layer. Args: cls_scores (Tensor): Mask score logits from a single decoder layer for all images. Shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_preds (Tensor): Mask logits for a pixel decoder for all images. Shape (batch_size, num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. Returns: tuple[Tensor]: Loss components for outputs from a single decoder\ layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) = self.get_targets(cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) # shape (batch_size, num_queries) labels = torch.stack(labels_list, dim=0) # shape (batch_size, num_queries) label_weights = torch.stack(label_weights_list, dim=0) # shape (num_total_gts, h, w) mask_targets = torch.cat(mask_targets_list, dim=0) # shape (batch_size, num_queries) mask_weights = torch.stack(mask_weights_list, dim=0) # classfication loss # shape (batch_size * num_queries, ) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor])) num_total_masks = max(num_total_masks, 1) # extract positive ones # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) mask_preds = mask_preds[mask_weights > 0] target_shape = mask_targets.shape[-2:] if mask_targets.shape[0] == 0: # zero match loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return loss_cls, loss_mask, loss_dice # upsample to shape of target # shape (num_total_gts, h, w) mask_preds = F.interpolate( mask_preds.unsqueeze(1), target_shape, mode='bilinear', align_corners=False).squeeze(1) # dice loss loss_dice = self.loss_dice( mask_preds, mask_targets, avg_factor=num_total_masks) # mask loss # FocalLoss support input of shape (n, num_class) h, w = mask_preds.shape[-2:] # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1) mask_preds = mask_preds.reshape(-1, 1) # shape (num_total_gts, h, w) -> (num_total_gts * h * w) mask_targets = mask_targets.reshape(-1) # target is (1 - mask_targets) !!! loss_mask = self.loss_mask( mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) return loss_cls, loss_mask, loss_dice def forward(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> Tuple[Tensor]: """Forward function. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple[Tensor]: a tuple contains two elements. - all_cls_scores (Tensor): Classification scores for each\ scale level. Each is a 4D-tensor with shape\ (num_decoder, batch_size, num_queries, cls_out_channels).\ Note `cls_out_channels` should includes background. - all_mask_preds (Tensor): Mask scores for each decoder\ layer. Each with shape (num_decoder, batch_size,\ num_queries, h, w). """ batch_img_metas = [ data_sample.metainfo for data_sample in batch_data_samples ] batch_size = len(batch_img_metas) input_img_h, input_img_w = batch_img_metas[0]['batch_input_shape'] padding_mask = x[-1].new_ones((batch_size, input_img_h, input_img_w), dtype=torch.float32) for i in range(batch_size): img_h, img_w = batch_img_metas[i]['img_shape'] padding_mask[i, :img_h, :img_w] = 0 padding_mask = F.interpolate( padding_mask.unsqueeze(1), size=x[-1].shape[-2:], mode='nearest').to(torch.bool).squeeze(1) # when backbone is swin, memory is output of last stage of swin. # when backbone is r50, memory is output of tranformer encoder. mask_features, memory = self.pixel_decoder(x, batch_img_metas) pos_embed = self.decoder_pe(padding_mask) memory = self.decoder_input_proj(memory) # shape (batch_size, c, h, w) -> (batch_size, h*w, c) memory = memory.flatten(2).permute(0, 2, 1) pos_embed = pos_embed.flatten(2).permute(0, 2, 1) # shape (batch_size, h * w) padding_mask = padding_mask.flatten(1) # shape = (num_queries, embed_dims) query_embed = self.query_embed.weight # shape = (batch_size, num_queries, embed_dims) query_embed = query_embed.unsqueeze(0).repeat(batch_size, 1, 1) target = torch.zeros_like(query_embed) # shape (num_decoder, num_queries, batch_size, embed_dims) out_dec = self.transformer_decoder( query=target, key=memory, value=memory, query_pos=query_embed, key_pos=pos_embed, key_padding_mask=padding_mask) # cls_scores all_cls_scores = self.cls_embed(out_dec) # mask_preds mask_embed = self.mask_embed(out_dec) all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, mask_features) return all_cls_scores, all_mask_preds def loss( self, x: Tuple[Tensor], batch_data_samples: SampleList, ) -> Dict[str, Tensor]: """Perform forward propagation and loss calculation of the panoptic head on the features of the upstream network. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict[str, Tensor]: a dictionary of loss components """ batch_img_metas = [] batch_gt_instances = [] batch_gt_semantic_segs = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'gt_sem_seg' in data_sample: batch_gt_semantic_segs.append(data_sample.gt_sem_seg) else: batch_gt_semantic_segs.append(None) # forward all_cls_scores, all_mask_preds = self(x, batch_data_samples) # preprocess ground truth batch_gt_instances = self.preprocess_gt(batch_gt_instances, batch_gt_semantic_segs) # loss losses = self.loss_by_feat(all_cls_scores, all_mask_preds, batch_gt_instances, batch_img_metas) return losses def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> Tuple[Tensor]: """Test without augmentaton. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple[Tensor]: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w). """ batch_img_metas = [ data_sample.metainfo for data_sample in batch_data_samples ] all_cls_scores, all_mask_preds = self(x, batch_data_samples) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = batch_img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results
26,604
43.194352
79
py
ERD
ERD-main/mmdet/models/dense_heads/pisa_ssd_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Union import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p from ..utils import multi_apply from .ssd_head import SSDHead # TODO: add loss evaluator for SSD @MODELS.register_module() class PISASSDHead(SSDHead): """Implementation of `PISA SSD head <https://arxiv.org/abs/1904.04821>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (Sequence[int]): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 0. feat_channels (int): Number of hidden channels when stacked_convs > 0. Defaults to 256. use_depthwise (bool): Whether to use DepthwiseSeparableConv. Defaults to False. conv_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config norm layer. Defaults to None. act_cfg (:obj:`ConfigDict` or dict, Optional): Dictionary to construct and config activation layer. Defaults to None. anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator. bbox_coder (:obj:`ConfigDict` or dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. train_cfg (:obj:`ConfigDict` or dict, Optional): Training config of anchor head. test_cfg (:obj:`ConfigDict` or dict, Optional): Testing config of anchor head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], Optional): Initialization config dict. """ # noqa: W605 def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Union[List[Tensor], Tensor]]: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Union[List[Tensor], Tensor]]: A dictionary of loss components. the dict has components below: - loss_cls (list[Tensor]): A list containing each feature map \ classification loss. - loss_bbox (list[Tensor]): A list containing each feature map \ regression loss. - loss_carl (Tensor): The loss of CARL. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=False, return_sampling_results=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets num_images = len(batch_img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view(-1), all_label_weights.view(-1), all_bbox_targets.view(-1, 4), all_bbox_weights.view(-1, 4)) # apply ISR-P if isr_cfg is not None: all_targets = isr_p( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_bbox_preds.view(-1, 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg['isr'], num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) # add CARL loss carl_loss_cfg = self.train_cfg.get('carl', None) if carl_loss_cfg is not None: loss_carl = carl_loss( all_cls_scores.view(-1, all_cls_scores.size(-1)), all_targets[0], all_bbox_preds.view(-1, 4), all_targets[2], SmoothL1Loss(beta=1.), **self.train_cfg['carl'], avg_factor=avg_factor, num_class=self.num_classes) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if carl_loss_cfg is not None: loss_dict.update(loss_carl) return loss_dict
7,998
42.710383
79
py
ERD
ERD-main/mmdet/models/dense_heads/base_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import List, Tuple, Union from mmengine.model import BaseModule from torch import Tensor from mmdet.structures import SampleList from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig from ..utils import unpack_gt_instances class BaseMaskHead(BaseModule, metaclass=ABCMeta): """Base class for mask heads used in One-Stage Instance Segmentation.""" def __init__(self, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) @abstractmethod def loss_by_feat(self, *args, **kwargs): """Calculate the loss based on the features extracted by the mask head.""" pass @abstractmethod def predict_by_feat(self, *args, **kwargs): """Transform a batch of output features extracted from the head into mask results.""" pass def loss(self, x: Union[List[Tensor], Tuple[Tensor]], batch_data_samples: SampleList, positive_infos: OptInstanceList = None, **kwargs) -> dict: """Perform forward propagation and loss calculation of the mask head on the features of the upstream network. Args: x (list[Tensor] | tuple[Tensor]): Features from FPN. Each has a shape (B, C, H, W). batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. positive_infos (list[:obj:`InstanceData`], optional): Information of positive samples. Used when the label assignment is done outside the MaskHead, e.g., BboxHead in YOLACT or CondInst, etc. When the label assignment is done in MaskHead, it would be None, like SOLO or SOLOv2. All values in it should have shape (num_positive_samples, *). Returns: dict: A dictionary of loss components. """ if positive_infos is None: outs = self(x) else: outs = self(x, positive_infos) assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \ 'even if only one item is returned' outputs = unpack_gt_instances(batch_data_samples) batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \ = outputs for gt_instances, img_metas in zip(batch_gt_instances, batch_img_metas): img_shape = img_metas['batch_input_shape'] gt_masks = gt_instances.masks.pad(img_shape) gt_instances.masks = gt_masks losses = self.loss_by_feat( *outs, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, positive_infos=positive_infos, batch_gt_instances_ignore=batch_gt_instances_ignore, **kwargs) return losses def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = False, results_list: OptInstanceList = None, **kwargs) -> InstanceList: """Test function without test-time augmentation. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. results_list (list[obj:`InstanceData`], optional): Detection results of each image after the post process. Only exist if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc. Returns: list[obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Has a shape (num_instances,). - masks (Tensor): Processed mask results, has a shape (num_instances, h, w). """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] if results_list is None: outs = self(x) else: outs = self(x, results_list) results_list = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale, results_list=results_list, **kwargs) return results_list
4,989
37.682171
79
py
ERD
ERD-main/mmdet/models/dense_heads/solo_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import mmcv import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.utils.misc import floordiv from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType from ..layers import mask_matrix_nms from ..utils import center_of_mass, generate_coordinate, multi_apply from .base_mask_head import BaseMaskHead @MODELS.register_module() class SOLOHead(BaseMaskHead): """SOLO mask head used in `SOLO: Segmenting Objects by Locations. <https://arxiv.org/abs/1912.04488>`_ Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. Defaults to 256. stacked_convs (int): Number of stacking convs of the head. Defaults to 4. strides (tuple): Downsample factor of each feature map. scale_ranges (tuple[tuple[int, int]]): Area range of multiple level masks, in the format [(min1, max1), (min2, max2), ...]. A range of (16, 64) means the area range between (16, 64). pos_scale (float): Constant scale factor to control the center region. num_grids (list[int]): Divided image into a uniform grids, each feature map has a different grid value. The number of output channels is grid ** 2. Defaults to [40, 36, 24, 16, 12]. cls_down_index (int): The index of downsample operation in classification branch. Defaults to 0. loss_mask (dict): Config of mask loss. loss_cls (dict): Config of classification loss. norm_cfg (dict): Dictionary to construct and config norm layer. Defaults to norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). train_cfg (dict): Training config of head. test_cfg (dict): Testing config of head. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, stacked_convs: int = 4, strides: tuple = (4, 8, 16, 32, 64), scale_ranges: tuple = ((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), pos_scale: float = 0.2, num_grids: list = [40, 36, 24, 16, 12], cls_down_index: int = 0, loss_mask: ConfigType = dict( type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ] ) -> None: super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.cls_out_channels = self.num_classes self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.num_grids = num_grids # number of FPN feats self.num_levels = len(strides) assert self.num_levels == len(scale_ranges) == len(num_grids) self.scale_ranges = scale_ranges self.pos_scale = pos_scale self.cls_down_index = cls_down_index self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.norm_cfg = norm_cfg self.init_cfg = init_cfg self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list.append( nn.Conv2d(self.feat_channels, num_grid**2, 1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def resize_feats(self, x: Tuple[Tensor]) -> List[Tensor]: """Downsample the first feat and upsample last feat in feats. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: list[Tensor]: Features after resizing, each is a 4D-tensor. """ out = [] for i in range(len(x)): if i == 0: out.append( F.interpolate(x[0], scale_factor=0.5, mode='bilinear')) elif i == len(x) - 1: out.append( F.interpolate( x[i], size=x[i - 1].shape[-2:], mode='bilinear')) else: out.append(x[i]) return out def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and mask prediction. - mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). """ assert len(x) == self.num_levels feats = self.resize_feats(x) mlvl_mask_preds = [] mlvl_cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in (self.mask_convs): mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_preds = self.conv_mask_list[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_preds = F.interpolate( mask_preds.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mlvl_mask_preds.append(mask_preds) mlvl_cls_preds.append(cls_pred) return mlvl_mask_preds, mlvl_cls_preds def loss_by_feat(self, mlvl_mask_preds: List[Tensor], mlvl_cls_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds] # `BoolTensor` in `pos_masks` represent # whether the corresponding point is # positive pos_mask_targets, labels, pos_masks = multi_apply( self._get_targets_single, batch_gt_instances, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds = [[] for _ in range(num_levels)] mlvl_pos_masks = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): assert num_levels == len(pos_mask_targets[img_id]) for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds[lvl].append( mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...]) mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten()) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds[lvl] = torch.cat( mlvl_pos_mask_preds[lvl], dim=0) mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = sum(item.sum() for item in mlvl_pos_masks) # dice loss loss_mask = [] for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets): if pred.size()[0] == 0: loss_mask.append(pred.sum().unsqueeze(0)) continue loss_mask.append( self.loss_mask(pred, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_instances: InstanceData, featmap_sizes: Optional[list] = None) -> tuple: """Compute targets for predictions of single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Defaults to None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_pos_masks (list[Tensor]): Each element is a `BoolTensor` to represent whether the corresponding point in single level is positive, has shape (num_grid **2). """ gt_labels = gt_instances.labels device = gt_labels.device gt_bboxes = gt_instances.bboxes gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device) mlvl_pos_mask_targets = [] mlvl_labels = [] mlvl_pos_masks = [] for (lower_bound, upper_bound), stride, featmap_size, num_grid \ in zip(self.scale_ranges, self.strides, featmap_sizes, self.num_grids): mask_target = torch.zeros( [num_grid**2, featmap_size[0], featmap_size[1]], dtype=torch.uint8, device=device) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes labels = torch.zeros([num_grid, num_grid], dtype=torch.int64, device=device) + self.num_classes pos_mask = torch.zeros([num_grid**2], dtype=torch.bool, device=device) gt_inds = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(gt_inds) == 0: mlvl_pos_mask_targets.append( mask_target.new_zeros(0, featmap_size[0], featmap_size[1])) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) continue hit_gt_bboxes = gt_bboxes[gt_inds] hit_gt_labels = gt_labels[gt_inds] hit_gt_masks = gt_masks[gt_inds, ...] pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - hit_gt_bboxes[:, 0]) * self.pos_scale pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - hit_gt_bboxes[:, 1]) * self.pos_scale # Make sure hit_gt_masks has a value valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 output_stride = stride / 2 for gt_mask, gt_label, pos_h_range, pos_w_range, \ valid_mask_flag in \ zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, pos_w_ranges, valid_mask_flags): if not valid_mask_flag: continue upsampled_size = (featmap_sizes[0][0] * 4, featmap_sizes[0][1] * 4) center_h, center_w = center_of_mass(gt_mask) coord_w = int( floordiv((center_w / upsampled_size[1]), (1. / num_grid), rounding_mode='trunc')) coord_h = int( floordiv((center_h / upsampled_size[0]), (1. / num_grid), rounding_mode='trunc')) # left, top, right, down top_box = max( 0, int( floordiv( (center_h - pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) down_box = min( num_grid - 1, int( floordiv( (center_h + pos_h_range) / upsampled_size[0], (1. / num_grid), rounding_mode='trunc'))) left_box = max( 0, int( floordiv( (center_w - pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) right_box = min( num_grid - 1, int( floordiv( (center_w + pos_w_range) / upsampled_size[1], (1. / num_grid), rounding_mode='trunc'))) top = max(top_box, coord_h - 1) down = min(down_box, coord_h + 1) left = max(coord_w - 1, left_box) right = min(right_box, coord_w + 1) labels[top:(down + 1), left:(right + 1)] = gt_label # ins gt_mask = np.uint8(gt_mask.cpu().numpy()) # Follow the original implementation, F.interpolate is # different from cv2 and opencv gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride) gt_mask = torch.from_numpy(gt_mask).to(device=device) for i in range(top, down + 1): for j in range(left, right + 1): index = int(i * num_grid + j) mask_target[index, :gt_mask.shape[0], :gt_mask. shape[1]] = gt_mask pos_mask[index] = True mlvl_pos_mask_targets.append(mask_target[pos_mask]) mlvl_labels.append(labels) mlvl_pos_masks.append(pos_mask) return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks def predict_by_feat(self, mlvl_mask_preds: List[Tensor], mlvl_cls_scores: List[Tensor], batch_img_metas: List[dict], **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. Each element in the list has shape (batch_size, num_grids**2 ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(batch_img_metas)): cls_pred_list = [ mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) for lvl in range(num_levels) ] mask_pred_list = [ mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list = torch.cat(mask_pred_list, dim=0) img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single( cls_pred_list, mask_pred_list, img_meta=img_meta) results_list.append(results) return results_list def _predict_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, img_meta: dict, cfg: OptConfigType = None) -> InstanceData: """Transform a single image's features extracted from the head into mask results. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds (Tensor): Mask prediction of all points in single image, has shape (num_points, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(cls_scores, ori_shape): """Generate a empty results.""" results = InstanceData() results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *ori_shape) results.labels = cls_scores.new_ones(0) results.bboxes = cls_scores.new_zeros(0, 4) return results cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(mask_preds) featmap_size = mask_preds.size()[-2:] h, w = img_meta['img_shape'][:2] upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] if len(cls_scores) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) inds = score_mask.nonzero() cls_labels = inds[:, 1] # Filter the mask mask with an area is smaller than # stride of corresponding feature level lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) strides = cls_scores.new_ones(lvl_interval[-1]) strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= self.strides[lvl] strides = strides[inds[:, 0]] mask_preds = mask_preds[inds[:, 0]] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) # mask_matrix_nms may return an empty Tensor if len(keep_inds) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=img_meta['ori_shape'][:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results = InstanceData() results.masks = masks results.labels = labels results.scores = scores # create an empty bbox in InstanceData to avoid bugs when # calculating metrics. results.bboxes = results.scores.new_zeros(len(scores), 4) return results @MODELS.register_module() class DecoupledSOLOHead(SOLOHead): """Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations. <https://arxiv.org/abs/1912.04488>`_ Args: init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: self.mask_convs_x = nn.ModuleList() self.mask_convs_y = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels + 1 if i == 0 else self.feat_channels self.mask_convs_x.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.mask_convs_y.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and mask prediction. - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). """ assert len(x) == self.num_levels feats = self.resize_feats(x) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1) mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1) for mask_layer_x, mask_layer_y in \ zip(self.mask_convs_x, self.mask_convs_y): mask_feat_x = mask_layer_x(mask_feat_x) mask_feat_y = mask_layer_y(mask_feat_y) mask_feat_x = F.interpolate( mask_feat_x, scale_factor=2, mode='bilinear') mask_feat_y = F.interpolate( mask_feat_y, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat_x) mask_pred_y = self.conv_mask_list_y[i](mask_feat_y) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds def loss_by_feat(self, mlvl_mask_preds_x: List[Tensor], mlvl_mask_preds_y: List[Tensor], mlvl_cls_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_levels = self.num_levels num_imgs = len(batch_img_metas) featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x] pos_mask_targets, labels, xy_pos_indexes = multi_apply( self._get_targets_single, batch_gt_instances, featmap_sizes=featmap_sizes) # change from the outside list meaning multi images # to the outside list meaning multi levels mlvl_pos_mask_targets = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)] mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)] mlvl_labels = [[] for _ in range(num_levels)] for img_id in range(num_imgs): for lvl in range(num_levels): mlvl_pos_mask_targets[lvl].append( pos_mask_targets[img_id][lvl]) mlvl_pos_mask_preds_x[lvl].append( mlvl_mask_preds_x[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 1]]) mlvl_pos_mask_preds_y[lvl].append( mlvl_mask_preds_y[lvl][img_id, xy_pos_indexes[img_id][lvl][:, 0]]) mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) # cat multiple image temp_mlvl_cls_preds = [] for lvl in range(num_levels): mlvl_pos_mask_targets[lvl] = torch.cat( mlvl_pos_mask_targets[lvl], dim=0) mlvl_pos_mask_preds_x[lvl] = torch.cat( mlvl_pos_mask_preds_x[lvl], dim=0) mlvl_pos_mask_preds_y[lvl] = torch.cat( mlvl_pos_mask_preds_y[lvl], dim=0) mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) num_pos = 0. # dice loss loss_mask = [] for pred_x, pred_y, target in \ zip(mlvl_pos_mask_preds_x, mlvl_pos_mask_preds_y, mlvl_pos_mask_targets): num_masks = pred_x.size(0) if num_masks == 0: # make sure can get grad loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0)) continue num_pos += num_masks pred_mask = pred_y.sigmoid() * pred_x.sigmoid() loss_mask.append( self.loss_mask(pred_mask, target, reduction_override='none')) if num_pos > 0: loss_mask = torch.cat(loss_mask).sum() / num_pos else: loss_mask = torch.cat(loss_mask).mean() # cate flatten_labels = torch.cat(mlvl_labels) flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) loss_cls = self.loss_cls( flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) return dict(loss_mask=loss_mask, loss_cls=loss_cls) def _get_targets_single(self, gt_instances: InstanceData, featmap_sizes: Optional[list] = None) -> tuple: """Compute targets for predictions of single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. featmap_sizes (list[:obj:`torch.size`]): Size of each feature map from feature pyramid, each element means (feat_h, feat_w). Defaults to None. Returns: Tuple: Usually returns a tuple containing targets for predictions. - mlvl_pos_mask_targets (list[Tensor]): Each element represent the binary mask targets for positive points in this level, has shape (num_pos, out_h, out_w). - mlvl_labels (list[Tensor]): Each element is classification labels for all points in this level, has shape (num_grid, num_grid). - mlvl_xy_pos_indexes (list[Tensor]): Each element in the list contains the index of positive samples in corresponding level, has shape (num_pos, 2), last dimension 2 present (index_x, index_y). """ mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks = \ super()._get_targets_single(gt_instances, featmap_sizes=featmap_sizes) mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero() for item in mlvl_labels] return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes def predict_by_feat(self, mlvl_mask_preds_x: List[Tensor], mlvl_mask_preds_y: List[Tensor], mlvl_cls_scores: List[Tensor], batch_img_metas: List[dict], **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes ,num_grids ,num_grids). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ mlvl_cls_scores = [ item.permute(0, 2, 3, 1) for item in mlvl_cls_scores ] assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores) num_levels = len(mlvl_cls_scores) results_list = [] for img_id in range(len(batch_img_metas)): cls_pred_list = [ mlvl_cls_scores[i][img_id].view( -1, self.cls_out_channels).detach() for i in range(num_levels) ] mask_pred_list_x = [ mlvl_mask_preds_x[i][img_id] for i in range(num_levels) ] mask_pred_list_y = [ mlvl_mask_preds_y[i][img_id] for i in range(num_levels) ] cls_pred_list = torch.cat(cls_pred_list, dim=0) mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0) mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0) img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single( cls_pred_list, mask_pred_list_x, mask_pred_list_y, img_meta=img_meta) results_list.append(results) return results_list def _predict_by_feat_single(self, cls_scores: Tensor, mask_preds_x: Tensor, mask_preds_y: Tensor, img_meta: dict, cfg: OptConfigType = None) -> InstanceData: """Transform a single image's features extracted from the head into mask results. Args: cls_scores (Tensor): Classification score of all points in single image, has shape (num_points, num_classes). mask_preds_x (Tensor): Mask prediction of x branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). mask_preds_y (Tensor): Mask prediction of y branch of all points in single image, has shape (sum_num_grids, feat_h, feat_w). img_meta (dict): Meta information of corresponding image. cfg (dict): Config used in test phase. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ def empty_results(cls_scores, ori_shape): """Generate a empty results.""" results = InstanceData() results.scores = cls_scores.new_ones(0) results.masks = cls_scores.new_zeros(0, *ori_shape) results.labels = cls_scores.new_ones(0) results.bboxes = cls_scores.new_zeros(0, 4) return results cfg = self.test_cfg if cfg is None else cfg featmap_size = mask_preds_x.size()[-2:] h, w = img_meta['img_shape'][:2] upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) score_mask = (cls_scores > cfg.score_thr) cls_scores = cls_scores[score_mask] inds = score_mask.nonzero() lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0) num_all_points = lvl_interval[-1] lvl_start_index = inds.new_ones(num_all_points) num_grids = inds.new_ones(num_all_points) seg_size = inds.new_tensor(self.num_grids).cumsum(0) mask_lvl_start_index = inds.new_ones(num_all_points) strides = inds.new_ones(num_all_points) lvl_start_index[:lvl_interval[0]] *= 0 mask_lvl_start_index[:lvl_interval[0]] *= 0 num_grids[:lvl_interval[0]] *= self.num_grids[0] strides[:lvl_interval[0]] *= self.strides[0] for lvl in range(1, self.num_levels): lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ lvl_interval[lvl - 1] mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ seg_size[lvl - 1] num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.num_grids[lvl] strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ self.strides[lvl] lvl_start_index = lvl_start_index[inds[:, 0]] mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]] num_grids = num_grids[inds[:, 0]] strides = strides[inds[:, 0]] y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids y_inds = mask_lvl_start_index + y_lvl_offset x_inds = mask_lvl_start_index + x_lvl_offset cls_labels = inds[:, 1] mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...] masks = mask_preds > cfg.mask_thr sum_masks = masks.sum((1, 2)).float() keep = sum_masks > strides if keep.sum() == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) masks = masks[keep] mask_preds = mask_preds[keep] sum_masks = sum_masks[keep] cls_scores = cls_scores[keep] cls_labels = cls_labels[keep] # maskness. mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks cls_scores *= mask_scores scores, labels, _, keep_inds = mask_matrix_nms( masks, cls_labels, cls_scores, mask_area=sum_masks, nms_pre=cfg.nms_pre, max_num=cfg.max_per_img, kernel=cfg.kernel, sigma=cfg.sigma, filter_thr=cfg.filter_thr) # mask_matrix_nms may return an empty Tensor if len(keep_inds) == 0: return empty_results(cls_scores, img_meta['ori_shape'][:2]) mask_preds = mask_preds[keep_inds] mask_preds = F.interpolate( mask_preds.unsqueeze(0), size=upsampled_size, mode='bilinear')[:, :, :h, :w] mask_preds = F.interpolate( mask_preds, size=img_meta['ori_shape'][:2], mode='bilinear').squeeze(0) masks = mask_preds > cfg.mask_thr results = InstanceData() results.masks = masks results.labels = labels results.scores = scores # create an empty bbox in InstanceData to avoid bugs when # calculating metrics. results.bboxes = results.scores.new_zeros(len(scores), 4) return results @MODELS.register_module() class DecoupledSOLOLightHead(DecoupledSOLOHead): """Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by Locations <https://arxiv.org/abs/1912.04488>`_ Args: with_dcn (bool): Whether use dcn in mask_convs and cls_convs, Defaults to False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, *args, dcn_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_x')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_mask_list_y')), dict( type='Normal', std=0.01, bias_prob=0.01, override=dict(name='conv_cls')) ], **kwargs) -> None: assert dcn_cfg is None or isinstance(dcn_cfg, dict) self.dcn_cfg = dcn_cfg super().__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: self.mask_convs = nn.ModuleList() self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): if self.dcn_cfg is not None \ and i == self.stacked_convs - 1: conv_cfg = self.dcn_cfg else: conv_cfg = None chn = self.in_channels + 2 if i == 0 else self.feat_channels self.mask_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg)) self.conv_mask_list_x = nn.ModuleList() self.conv_mask_list_y = nn.ModuleList() for num_grid in self.num_grids: self.conv_mask_list_x.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_mask_list_y.append( nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and mask prediction. - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction from x branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction from y branch. Each element in the list has shape (batch_size, num_grids ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids ,num_grids). """ assert len(x) == self.num_levels feats = self.resize_feats(x) mask_preds_x = [] mask_preds_y = [] cls_preds = [] for i in range(self.num_levels): x = feats[i] mask_feat = x cls_feat = x # generate and concat the coordinate coord_feat = generate_coordinate(mask_feat.size(), mask_feat.device) mask_feat = torch.cat([mask_feat, coord_feat], 1) for mask_layer in self.mask_convs: mask_feat = mask_layer(mask_feat) mask_feat = F.interpolate( mask_feat, scale_factor=2, mode='bilinear') mask_pred_x = self.conv_mask_list_x[i](mask_feat) mask_pred_y = self.conv_mask_list_y[i](mask_feat) # cls branch for j, cls_layer in enumerate(self.cls_convs): if j == self.cls_down_index: num_grid = self.num_grids[i] cls_feat = F.interpolate( cls_feat, size=num_grid, mode='bilinear') cls_feat = cls_layer(cls_feat) cls_pred = self.conv_cls(cls_feat) if not self.training: feat_wh = feats[0].size()[-2:] upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) mask_pred_x = F.interpolate( mask_pred_x.sigmoid(), size=upsampled_size, mode='bilinear') mask_pred_y = F.interpolate( mask_pred_y.sigmoid(), size=upsampled_size, mode='bilinear') cls_pred = cls_pred.sigmoid() # get local maximum local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) keep_mask = local_max[:, :, :-1, :-1] == cls_pred cls_pred = cls_pred * keep_mask mask_preds_x.append(mask_pred_x) mask_preds_y.append(mask_pred_y) cls_preds.append(cls_pred) return mask_preds_x, mask_preds_y, cls_preds
52,112
40.228639
79
py
ERD
ERD-main/mmdet/models/dense_heads/embedding_rpn_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn as nn from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_cxcywh_to_xyxy from mmdet.structures.det_data_sample import SampleList from mmdet.utils import InstanceList, OptConfigType @MODELS.register_module() class EmbeddingRPNHead(BaseModule): """RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ . Unlike traditional RPNHead, this module does not need FPN input, but just decode `init_proposal_bboxes` and expand the first dimension of `init_proposal_bboxes` and `init_proposal_features` to the batch_size. Args: num_proposals (int): Number of init_proposals. Defaults to 100. proposal_feature_channel (int): Channel number of init_proposal_feature. Defaults to 256. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. Defaults to None. """ def __init__(self, num_proposals: int = 100, proposal_feature_channel: int = 256, init_cfg: OptConfigType = None, **kwargs) -> None: # `**kwargs` is necessary to avoid some potential error. assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg) self.num_proposals = num_proposals self.proposal_feature_channel = proposal_feature_channel self._init_layers() def _init_layers(self) -> None: """Initialize a sparse set of proposal boxes and proposal features.""" self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) self.init_proposal_features = nn.Embedding( self.num_proposals, self.proposal_feature_channel) def init_weights(self) -> None: """Initialize the init_proposal_bboxes as normalized. [c_x, c_y, w, h], and we initialize it to the size of the entire image. """ super().init_weights() nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) def _decode_init_proposals(self, x: List[Tensor], batch_data_samples: SampleList) -> InstanceList: """Decode init_proposal_bboxes according to the size of images and expand dimension of init_proposal_features to batch_size. Args: x (list[Tensor]): List of FPN features. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: List[:obj:`InstanceData`:] Detection results of each image. Each item usually contains following keys. - proposals: Decoded proposal bboxes, has shape (num_proposals, 4). - features: init_proposal_features, expanded proposal features, has shape (num_proposals, proposal_feature_channel). - imgs_whwh: Tensor with shape (num_proposals, 4), the dimension means [img_width, img_height, img_width, img_height]. """ batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) proposals = self.init_proposal_bboxes.weight.clone() proposals = bbox_cxcywh_to_xyxy(proposals) imgs_whwh = [] for meta in batch_img_metas: h, w = meta['img_shape'][:2] imgs_whwh.append(x[0].new_tensor([[w, h, w, h]])) imgs_whwh = torch.cat(imgs_whwh, dim=0) imgs_whwh = imgs_whwh[:, None, :] proposals = proposals * imgs_whwh rpn_results_list = [] for idx in range(len(batch_img_metas)): rpn_results = InstanceData() rpn_results.bboxes = proposals[idx] rpn_results.imgs_whwh = imgs_whwh[idx].repeat( self.num_proposals, 1) rpn_results.features = self.init_proposal_features.weight.clone() rpn_results_list.append(rpn_results) return rpn_results_list def loss(self, *args, **kwargs): """Perform forward propagation and loss calculation of the detection head on the features of the upstream network.""" raise NotImplementedError( 'EmbeddingRPNHead does not have `loss`, please use ' '`predict` or `loss_and_predict` instead.') def predict(self, x: List[Tensor], batch_data_samples: SampleList, **kwargs) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network.""" # `**kwargs` is necessary to avoid some potential error. return self._decode_init_proposals( x=x, batch_data_samples=batch_data_samples) def loss_and_predict(self, x: List[Tensor], batch_data_samples: SampleList, **kwargs) -> tuple: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples.""" # `**kwargs` is necessary to avoid some potential error. predictions = self._decode_init_proposals( x=x, batch_data_samples=batch_data_samples) return dict(), predictions
5,695
41.827068
79
py
ERD
ERD-main/mmdet/models/dense_heads/autoassign_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Sequence, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Scale from mmengine.model import bias_init_with_prob, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..task_modules.prior_generators import MlvlPointGenerator from ..utils import levels_to_images, multi_apply from .fcos_head import FCOSHead EPS = 1e-12 class CenterPrior(nn.Module): """Center Weighting module to adjust the category-specific prior distributions. Args: force_topk (bool): When no point falls into gt_bbox, forcibly select the k points closest to the center to calculate the center prior. Defaults to False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. num_classes (int): The class number of dataset. Defaults to 80. strides (Sequence[int]): The stride of each input feature map. Defaults to (8, 16, 32, 64, 128). """ def __init__( self, force_topk: bool = False, topk: int = 9, num_classes: int = 80, strides: Sequence[int] = (8, 16, 32, 64, 128) ) -> None: super().__init__() self.mean = nn.Parameter(torch.zeros(num_classes, 2)) self.sigma = nn.Parameter(torch.ones(num_classes, 2)) self.strides = strides self.force_topk = force_topk self.topk = topk def forward(self, anchor_points_list: List[Tensor], gt_instances: InstanceData, inside_gt_bbox_mask: Tensor) -> Tuple[Tensor, Tensor]: """Get the center prior of each point on the feature map for each instance. Args: anchor_points_list (list[Tensor]): list of coordinate of points on feature map. Each with shape (num_points, 2). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple[Tensor, Tensor]: - center_prior_weights(Tensor): Float tensor with shape of \ (num_points, num_gt). Each value represents the center \ weighting coefficient. - inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape \ of (num_points, num_gt), each value is used to mark whether this \ point falls within a certain gt or is the topk nearest points for \ a specific gt_bbox. """ gt_bboxes = gt_instances.bboxes labels = gt_instances.labels inside_gt_bbox_mask = inside_gt_bbox_mask.clone() num_gts = len(labels) num_points = sum([len(item) for item in anchor_points_list]) if num_gts == 0: return gt_bboxes.new_zeros(num_points, num_gts), inside_gt_bbox_mask center_prior_list = [] for slvl_points, stride in zip(anchor_points_list, self.strides): # slvl_points: points from single level in FPN, has shape (h*w, 2) # single_level_points has shape (h*w, num_gt, 2) single_level_points = slvl_points[:, None, :].expand( (slvl_points.size(0), len(gt_bboxes), 2)) gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2) gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2) gt_center = torch.stack((gt_center_x, gt_center_y), dim=1) gt_center = gt_center[None] # instance_center has shape (1, num_gt, 2) instance_center = self.mean[labels][None] # instance_sigma has shape (1, num_gt, 2) instance_sigma = self.sigma[labels][None] # distance has shape (num_points, num_gt, 2) distance = (((single_level_points - gt_center) / float(stride) - instance_center)**2) center_prior = torch.exp(-distance / (2 * instance_sigma**2)).prod(dim=-1) center_prior_list.append(center_prior) center_prior_weights = torch.cat(center_prior_list, dim=0) if self.force_topk: gt_inds_no_points_inside = torch.nonzero( inside_gt_bbox_mask.sum(0) == 0).reshape(-1) if gt_inds_no_points_inside.numel(): topk_center_index = \ center_prior_weights[:, gt_inds_no_points_inside].topk( self.topk, dim=0)[1] temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside] inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \ torch.scatter(temp_mask, dim=0, index=topk_center_index, src=torch.ones_like( topk_center_index, dtype=torch.bool)) center_prior_weights[~inside_gt_bbox_mask] = 0 return center_prior_weights, inside_gt_bbox_mask @MODELS.register_module() class AutoAssignHead(FCOSHead): """AutoAssignHead head used in AutoAssign. More details can be found in the `paper <https://arxiv.org/abs/2007.03496>`_ . Args: force_topk (bool): Used in center prior initialization to handle extremely small gt. Default is False. topk (int): The number of points used to calculate the center prior when no point falls in gt_bbox. Only work when force_topk if True. Defaults to 9. pos_loss_weight (float): The loss weight of positive loss and with default value 0.25. neg_loss_weight (float): The loss weight of negative loss and with default value 0.75. center_loss_weight (float): The loss weight of center prior loss and with default value 0.75. """ def __init__(self, *args, force_topk: bool = False, topk: int = 9, pos_loss_weight: float = 0.25, neg_loss_weight: float = 0.75, center_loss_weight: float = 0.75, **kwargs) -> None: super().__init__(*args, conv_bias=True, **kwargs) self.center_prior = CenterPrior( force_topk=force_topk, topk=topk, num_classes=self.num_classes, strides=self.strides) self.pos_loss_weight = pos_loss_weight self.neg_loss_weight = neg_loss_weight self.center_loss_weight = center_loss_weight self.prior_generator = MlvlPointGenerator(self.strides, offset=0) def init_weights(self) -> None: """Initialize weights of the head. In particular, we have special initialization for classified conv's and regression conv's bias """ super(AutoAssignHead, self).init_weights() bias_cls = bias_init_with_prob(0.02) normal_init(self.conv_cls, std=0.01, bias=bias_cls) normal_init(self.conv_reg, std=0.01, bias=4.0) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple[Tensor, Tensor, Tensor]: scores for each class, bbox predictions and centerness predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super( FCOSHead, self).forward_single(x) centerness = self.conv_centerness(reg_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) bbox_pred *= stride return cls_score, bbox_pred, centerness def get_pos_loss_single(self, cls_score: Tensor, objectness: Tensor, reg_loss: Tensor, gt_instances: InstanceData, center_prior_weights: Tensor) -> Tuple[Tensor]: """Calculate the positive loss of all points in gt_bboxes. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points, has shape (num_points, 1). reg_loss (Tensor): The regression loss of each gt_bbox and each prediction box, has shape of (num_points, num_gt). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. center_prior_weights (Tensor): Float tensor with shape of (num_points, num_gt). Each value represents the center weighting coefficient. Returns: tuple[Tensor]: - pos_loss (Tensor): The positive loss of all points in the \ gt_bboxes. """ gt_labels = gt_instances.labels # p_loc: localization confidence p_loc = torch.exp(-reg_loss) # p_cls: classification confidence p_cls = (cls_score * objectness)[:, gt_labels] # p_pos: joint confidence indicator p_pos = p_cls * p_loc # 3 is a hyper-parameter to control the contributions of high and # low confidence locations towards positive losses. confidence_weight = torch.exp(p_pos * 3) p_pos_weight = (confidence_weight * center_prior_weights) / ( (confidence_weight * center_prior_weights).sum( 0, keepdim=True)).clamp(min=EPS) reweighted_p_pos = (p_pos * p_pos_weight).sum(0) pos_loss = F.binary_cross_entropy( reweighted_p_pos, torch.ones_like(reweighted_p_pos), reduction='none') pos_loss = pos_loss.sum() * self.pos_loss_weight return pos_loss, def get_neg_loss_single(self, cls_score: Tensor, objectness: Tensor, gt_instances: InstanceData, ious: Tensor, inside_gt_bbox_mask: Tensor) -> Tuple[Tensor]: """Calculate the negative loss of all points in feature map. Args: cls_score (Tensor): All category scores for each point on the feature map. The shape is (num_points, num_class). objectness (Tensor): Foreground probability of all points and is shape of (num_points, 1). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. ious (Tensor): Float tensor with shape of (num_points, num_gt). Each value represent the iou of pred_bbox and gt_bboxes. inside_gt_bbox_mask (Tensor): Tensor of bool type, with shape of (num_points, num_gt), each value is used to mark whether this point falls within a certain gt. Returns: tuple[Tensor]: - neg_loss (Tensor): The negative loss of all points in the \ feature map. """ gt_labels = gt_instances.labels num_gts = len(gt_labels) joint_conf = (cls_score * objectness) p_neg_weight = torch.ones_like(joint_conf) if num_gts > 0: # the order of dinmension would affect the value of # p_neg_weight, we strictly follow the original # implementation. inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0) ious = ious.permute(1, 0) foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True) temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS)) def normalize(x): return (x - x.min() + EPS) / (x.max() - x.min() + EPS) for instance_idx in range(num_gts): idxs = foreground_idxs[0] == instance_idx if idxs.any(): temp_weight[idxs] = normalize(temp_weight[idxs]) p_neg_weight[foreground_idxs[1], gt_labels[foreground_idxs[0]]] = 1 - temp_weight logits = (joint_conf * p_neg_weight) neg_loss = ( logits**2 * F.binary_cross_entropy( logits, torch.zeros_like(logits), reduction='none')) neg_loss = neg_loss.sum() * self.neg_loss_weight return neg_loss, def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], objectnesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. objectnesses (list[Tensor]): objectness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(objectnesses) all_num_gt = sum([len(item) for item in batch_gt_instances]) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets( all_level_points, batch_gt_instances) center_prior_weight_list = [] temp_inside_gt_bbox_mask_list = [] for gt_instances, inside_gt_bbox_mask in zip(batch_gt_instances, inside_gt_bbox_mask_list): center_prior_weight, inside_gt_bbox_mask = \ self.center_prior(all_level_points, gt_instances, inside_gt_bbox_mask) center_prior_weight_list.append(center_prior_weight) temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask) inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list mlvl_points = torch.cat(all_level_points, dim=0) bbox_preds = levels_to_images(bbox_preds) cls_scores = levels_to_images(cls_scores) objectnesses = levels_to_images(objectnesses) reg_loss_list = [] ious_list = [] num_points = len(mlvl_points) for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip( bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list): temp_num_gt = encoded_targets.size(1) expand_mlvl_points = mlvl_points[:, None, :].expand( num_points, temp_num_gt, 2).reshape(-1, 2) encoded_targets = encoded_targets.reshape(-1, 4) expand_bbox_pred = bbox_pred[:, None, :].expand( num_points, temp_num_gt, 4).reshape(-1, 4) decoded_bbox_preds = self.bbox_coder.decode( expand_mlvl_points, expand_bbox_pred) decoded_target_preds = self.bbox_coder.decode( expand_mlvl_points, encoded_targets) with torch.no_grad(): ious = bbox_overlaps( decoded_bbox_preds, decoded_target_preds, is_aligned=True) ious = ious.reshape(num_points, temp_num_gt) if temp_num_gt: ious = ious.max( dim=-1, keepdim=True).values.repeat(1, temp_num_gt) else: ious = ious.new_zeros(num_points, temp_num_gt) ious[~inside_gt_bbox_mask] = 0 ious_list.append(ious) loss_bbox = self.loss_bbox( decoded_bbox_preds, decoded_target_preds, weight=None, reduction_override='none') reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt)) cls_scores = [item.sigmoid() for item in cls_scores] objectnesses = [item.sigmoid() for item in objectnesses] pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores, objectnesses, reg_loss_list, batch_gt_instances, center_prior_weight_list) pos_avg_factor = reduce_mean( bbox_pred.new_tensor(all_num_gt)).clamp_(min=1) pos_loss = sum(pos_loss_list) / pos_avg_factor neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores, objectnesses, batch_gt_instances, ious_list, inside_gt_bbox_mask_list) neg_avg_factor = sum(item.data.sum() for item in center_prior_weight_list) neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1) neg_loss = sum(neg_loss_list) / neg_avg_factor center_loss = [] for i in range(len(batch_img_metas)): if inside_gt_bbox_mask_list[i].any(): center_loss.append( len(batch_gt_instances[i]) / center_prior_weight_list[i].sum().clamp_(min=EPS)) # when width or height of gt_bbox is smaller than stride of p3 else: center_loss.append(center_prior_weight_list[i].sum() * 0) center_loss = torch.stack(center_loss).mean() * self.center_loss_weight # avoid dead lock in DDP if all_num_gt == 0: pos_loss = bbox_preds[0].sum() * 0 dummy_center_prior_loss = self.center_prior.mean.sum( ) * 0 + self.center_prior.sigma.sum() * 0 center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss loss = dict( loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss) return loss def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList ) -> Tuple[List[Tensor], List[Tensor]]: """Compute regression targets and each point inside or outside gt_bbox in multiple images. Args: points (list[Tensor]): Points of all fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple(list[Tensor], list[Tensor]): - inside_gt_bbox_mask_list (list[Tensor]): Each Tensor is with \ bool type and shape of (num_points, num_gt), each value is used \ to mark whether this point falls within a certain gt. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. Each tensor has shape (num_points, num_gt, 4). """ concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl inside_gt_bbox_mask_list, bbox_targets_list = multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points) return inside_gt_bbox_mask_list, bbox_targets_list def _get_targets_single(self, gt_instances: InstanceData, points: Tensor) -> Tuple[Tensor, Tensor]: """Compute regression targets and each point inside or outside gt_bbox for a single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. points (Tensor): Points of all fpn level, has shape (num_points, 2). Returns: tuple[Tensor, Tensor]: Containing the following Tensors: - inside_gt_bbox_mask (Tensor): Bool tensor with shape \ (num_points, num_gt), each value is used to mark whether this \ point falls within a certain gt. - bbox_targets (Tensor): BBox targets of each points with each \ gt_bboxes, has shape (num_points, num_gt, 4). """ gt_bboxes = gt_instances.bboxes num_points = points.size(0) num_gts = gt_bboxes.size(0) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None] ys = ys[:, None] left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if num_gts: inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 else: inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts), dtype=torch.bool) return inside_gt_bbox_mask, bbox_targets
23,317
43.415238
79
py
ERD
ERD-main/mmdet/models/dense_heads/dino_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Tuple import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import multi_apply from .deformable_detr_head import DeformableDETRHead @MODELS.register_module() class DINOHead(DeformableDETRHead): r"""Head of the DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection Code is modified from the `official github repo <https://github.com/IDEA-Research/DINO>`_. More details can be found in the `paper <https://arxiv.org/abs/2203.03605>`_ . """ def loss(self, hidden_states: Tensor, references: List[Tensor], enc_outputs_class: Tensor, enc_outputs_coord: Tensor, batch_data_samples: SampleList, dn_meta: Dict[str, int]) -> dict: """Perform forward propagation and loss calculation of the detection head on the queries of the upstream network. Args: hidden_states (Tensor): Hidden states output from each decoder layer, has shape (num_decoder_layers, bs, num_queries_total, dim), where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries` when `self.training` is `True`, else `num_matching_queries`. references (list[Tensor]): List of the reference from the decoder. The first reference is the `init_reference` (initial) and the other num_decoder_layers(6) references are `inter_references` (intermediate). The `init_reference` has shape (bs, num_queries_total, 4) and each `inter_reference` has shape (bs, num_queries, 4) with the last dimension arranged as (cx, cy, w, h). enc_outputs_class (Tensor): The score of each point on encode feature map, has shape (bs, num_feat_points, cls_out_channels). enc_outputs_coord (Tensor): The proposal generate from the encode feature map, has shape (bs, num_feat_points, 4) with the last dimension arranged as (cx, cy, w, h). batch_data_samples (list[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: dict: A dictionary of loss components. """ batch_gt_instances = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) outs = self(hidden_states, references) loss_inputs = outs + (enc_outputs_class, enc_outputs_coord, batch_gt_instances, batch_img_metas, dn_meta) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, enc_cls_scores: Tensor, enc_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], dn_meta: Dict[str, int], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Loss function. Args: all_layers_cls_scores (Tensor): Classification scores of all decoder layers, has shape (num_decoder_layers, bs, num_queries_total, cls_out_channels), where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries`. all_layers_bbox_preds (Tensor): Regression outputs of all decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_queries_total, 4). enc_cls_scores (Tensor): The score of each point on encode feature map, has shape (bs, num_feat_points, cls_out_channels). enc_bbox_preds (Tensor): The proposal generate from the encode feature map, has shape (bs, num_feat_points, 4) with the last dimension arranged as (cx, cy, w, h). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ # extract denoising and matching part of outputs (all_layers_matching_cls_scores, all_layers_matching_bbox_preds, all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds) = \ self.split_outputs( all_layers_cls_scores, all_layers_bbox_preds, dn_meta) loss_dict = super(DeformableDETRHead, self).loss_by_feat( all_layers_matching_cls_scores, all_layers_matching_bbox_preds, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) # NOTE DETRHead.loss_by_feat but not DeformableDETRHead.loss_by_feat # is called, because the encoder loss calculations are different # between DINO and DeformableDETR. # loss of proposal generated from encode feature map. if enc_cls_scores is not None: # NOTE The enc_loss calculation of the DINO is # different from that of Deformable DETR. enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ self.loss_by_feat_single( enc_cls_scores, enc_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas) loss_dict['enc_loss_cls'] = enc_loss_cls loss_dict['enc_loss_bbox'] = enc_losses_bbox loss_dict['enc_loss_iou'] = enc_losses_iou if all_layers_denoising_cls_scores is not None: # calculate denoising loss from all decoder layers dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn( all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, dn_meta=dn_meta) # collate denoising loss loss_dict['dn_loss_cls'] = dn_losses_cls[-1] loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1] loss_dict['dn_loss_iou'] = dn_losses_iou[-1] for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \ enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1], dn_losses_iou[:-1])): loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i return loss_dict def loss_dn(self, all_layers_denoising_cls_scores: Tensor, all_layers_denoising_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]: """Calculate denoising loss. Args: all_layers_denoising_cls_scores (Tensor): Classification scores of all decoder layers in denoising part, has shape ( num_decoder_layers, bs, num_denoising_queries, cls_out_channels). all_layers_denoising_bbox_preds (Tensor): Regression outputs of all decoder layers in denoising part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_denoising_queries, 4). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou of each decoder layers. """ return multi_apply( self._loss_dn_single, all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds, batch_gt_instances=batch_gt_instances, batch_img_metas=batch_img_metas, dn_meta=dn_meta) def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor, batch_gt_instances: InstanceList, batch_img_metas: List[dict], dn_meta: Dict[str, int]) -> Tuple[Tensor]: """Denoising loss for outputs from a single decoder layer. Args: dn_cls_scores (Tensor): Classification scores of a single decoder layer in denoising part, has shape (bs, num_denoising_queries, cls_out_channels). dn_bbox_preds (Tensor): Regression outputs of a single decoder layer in denoising part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (bs, num_denoising_queries, 4). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and `loss_iou`. """ cls_reg_targets = self.get_dn_targets(batch_gt_instances, batch_img_metas, dn_meta) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets labels = torch.cat(labels_list, 0) label_weights = torch.cat(label_weights_list, 0) bbox_targets = torch.cat(bbox_targets_list, 0) bbox_weights = torch.cat(bbox_weights_list, 0) # classification loss cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels) # construct weighted avg_factor to match with the official DETR repo cls_avg_factor = \ num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight if self.sync_cls_avg_factor: cls_avg_factor = reduce_mean( cls_scores.new_tensor([cls_avg_factor])) cls_avg_factor = max(cls_avg_factor, 1) if len(cls_scores) > 0: loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=cls_avg_factor) else: loss_cls = torch.zeros( 1, dtype=cls_scores.dtype, device=cls_scores.device) # Compute the average number of gt boxes across all gpus, for # normalization purposes num_total_pos = loss_cls.new_tensor([num_total_pos]) num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() # construct factors used for rescale bboxes factors = [] for img_meta, bbox_pred in zip(batch_img_metas, dn_bbox_preds): img_h, img_w = img_meta['img_shape'] factor = bbox_pred.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0).repeat( bbox_pred.size(0), 1) factors.append(factor) factors = torch.cat(factors) # DETR regress the relative position of boxes (cxcywh) in the image, # thus the learning target is normalized by the image size. So here # we need to re-scale them for calculating IoU loss bbox_preds = dn_bbox_preds.reshape(-1, 4) bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors # regression IoU loss, defaultly GIoU loss loss_iou = self.loss_iou( bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) # regression L1 loss loss_bbox = self.loss_bbox( bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) return loss_cls, loss_bbox, loss_iou def get_dn_targets(self, batch_gt_instances: InstanceList, batch_img_metas: dict, dn_meta: Dict[str, int]) -> tuple: """Get targets in denoising part for a batch of images. Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: tuple: a tuple containing the following targets. - labels_list (list[Tensor]): Labels for all images. - label_weights_list (list[Tensor]): Label weights for all images. - bbox_targets_list (list[Tensor]): BBox targets for all images. - bbox_weights_list (list[Tensor]): BBox weights for all images. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. """ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( self._get_dn_targets_single, batch_gt_instances, batch_img_metas, dn_meta=dn_meta) num_total_pos = sum((inds.numel() for inds in pos_inds_list)) num_total_neg = sum((inds.numel() for inds in neg_inds_list)) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_dn_targets_single(self, gt_instances: InstanceData, img_meta: dict, dn_meta: Dict[str, int]) -> tuple: """Get targets in denoising part for one image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for one image. dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. It will be used for split outputs of denoising and matching parts and loss calculation. Returns: tuple[Tensor]: a tuple containing the following for one image. - labels (Tensor): Labels of each image. - label_weights (Tensor]): Label weights of each image. - bbox_targets (Tensor): BBox targets of each image. - bbox_weights (Tensor): BBox weights of each image. - pos_inds (Tensor): Sampled positive indices for each image. - neg_inds (Tensor): Sampled negative indices for each image. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels num_groups = dn_meta['num_denoising_groups'] num_denoising_queries = dn_meta['num_denoising_queries'] num_queries_each_group = int(num_denoising_queries / num_groups) device = gt_bboxes.device if len(gt_labels) > 0: t = torch.arange(len(gt_labels), dtype=torch.long, device=device) t = t.unsqueeze(0).repeat(num_groups, 1) pos_assigned_gt_inds = t.flatten() pos_inds = torch.arange( num_groups, dtype=torch.long, device=device) pos_inds = pos_inds.unsqueeze(1) * num_queries_each_group + t pos_inds = pos_inds.flatten() else: pos_inds = pos_assigned_gt_inds = \ gt_bboxes.new_tensor([], dtype=torch.long) neg_inds = pos_inds + num_queries_each_group // 2 # label targets labels = gt_bboxes.new_full((num_denoising_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[pos_assigned_gt_inds] label_weights = gt_bboxes.new_ones(num_denoising_queries) # bbox targets bbox_targets = torch.zeros(num_denoising_queries, 4, device=device) bbox_weights = torch.zeros(num_denoising_queries, 4, device=device) bbox_weights[pos_inds] = 1.0 img_h, img_w = img_meta['img_shape'] # DETR regress the relative position of boxes (cxcywh) in the image. # Thus the learning target should be normalized by the image size, also # the box format should be converted from defaultly x1y1x2y2 to cxcywh. factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) gt_bboxes_normalized = gt_bboxes / factor gt_bboxes_targets = bbox_xyxy_to_cxcywh(gt_bboxes_normalized) bbox_targets[pos_inds] = gt_bboxes_targets.repeat([num_groups, 1]) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) @staticmethod def split_outputs(all_layers_cls_scores: Tensor, all_layers_bbox_preds: Tensor, dn_meta: Dict[str, int]) -> Tuple[Tensor]: """Split outputs of the denoising part and the matching part. For the total outputs of `num_queries_total` length, the former `num_denoising_queries` outputs are from denoising queries, and the rest `num_matching_queries` ones are from matching queries, where `num_queries_total` is the sum of `num_denoising_queries` and `num_matching_queries`. Args: all_layers_cls_scores (Tensor): Classification scores of all decoder layers, has shape (num_decoder_layers, bs, num_queries_total, cls_out_channels). all_layers_bbox_preds (Tensor): Regression outputs of all decoder layers. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_queries_total, 4). dn_meta (Dict[str, int]): The dictionary saves information about group collation, including 'num_denoising_queries' and 'num_denoising_groups'. Returns: Tuple[Tensor]: a tuple containing the following outputs. - all_layers_matching_cls_scores (Tensor): Classification scores of all decoder layers in matching part, has shape (num_decoder_layers, bs, num_matching_queries, cls_out_channels). - all_layers_matching_bbox_preds (Tensor): Regression outputs of all decoder layers in matching part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_matching_queries, 4). - all_layers_denoising_cls_scores (Tensor): Classification scores of all decoder layers in denoising part, has shape (num_decoder_layers, bs, num_denoising_queries, cls_out_channels). - all_layers_denoising_bbox_preds (Tensor): Regression outputs of all decoder layers in denoising part. Each is a 4D-tensor with normalized coordinate format (cx, cy, w, h) and has shape (num_decoder_layers, bs, num_denoising_queries, 4). """ num_denoising_queries = dn_meta['num_denoising_queries'] if dn_meta is not None: all_layers_denoising_cls_scores = \ all_layers_cls_scores[:, :, : num_denoising_queries, :] all_layers_denoising_bbox_preds = \ all_layers_bbox_preds[:, :, : num_denoising_queries, :] all_layers_matching_cls_scores = \ all_layers_cls_scores[:, :, num_denoising_queries:, :] all_layers_matching_bbox_preds = \ all_layers_bbox_preds[:, :, num_denoising_queries:, :] else: all_layers_denoising_cls_scores = None all_layers_denoising_bbox_preds = None all_layers_matching_cls_scores = all_layers_cls_scores all_layers_matching_bbox_preds = all_layers_bbox_preds return (all_layers_matching_cls_scores, all_layers_matching_bbox_preds, all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds)
23,047
49.43326
79
py
ERD
ERD-main/mmdet/models/dense_heads/pisa_retinanet_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList from ..losses import carl_loss, isr_p from ..utils import images_to_levels from .retina_head import RetinaHead @MODELS.register_module() class PISARetinaHead(RetinaHead): """PISA Retinanet Head. The head owns the same structure with Retinanet Head, but differs in two aspects: 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to change the positive loss weights. 2. Classification-aware regression loss is adopted as a third loss. """ def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: Loss dict, comprise classification loss, regression loss and carl loss. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=True) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(batch_img_metas) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) for cls_score in cls_scores ] flatten_cls_scores = torch.cat( flatten_cls_scores, dim=1).reshape(-1, flatten_cls_scores[0].size(-1)) flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for bbox_pred in bbox_preds ] flatten_bbox_preds = torch.cat( flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) flatten_label_weights = torch.cat( label_weights_list, dim=1).reshape(-1) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) flatten_bbox_targets = torch.cat( bbox_targets_list, dim=1).reshape(-1, 4) flatten_bbox_weights = torch.cat( bbox_weights_list, dim=1).reshape(-1, 4) # Apply ISR-P isr_cfg = self.train_cfg.get('isr', None) if isr_cfg is not None: all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p( flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg['isr']) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets # For convenience we compute loss once instead separating by fpn level, # so that we don't need to separate the weights by level again. # The result should be the same losses_cls = self.loss_cls( flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=avg_factor) losses_bbox = self.loss_bbox( flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=avg_factor) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # CARL Loss carl_cfg = self.train_cfg.get('carl', None) if carl_cfg is not None: loss_carl = carl_loss( flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg['carl'], avg_factor=avg_factor, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict
6,298
39.63871
79
py
ERD
ERD-main/mmdet/models/dense_heads/gfl_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from .anchor_head import AnchorHead from ..task_modules.prior_generators import anchor_inside_flags from ..task_modules.samplers import PseudoSampler from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, unmap) from ..utils import unpack_gt_instances INF = 100000.0 EPS = 1.0e-7 class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: ``sum{P(y_i) * y_i}``, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Defaults to 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max: int = 16) -> None: super().__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x: Tensor) -> Tensor: """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @MODELS.register_module() class GFLHead(AnchorHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (:obj:`ConfigDict` or dict): Config of Quality Focal Loss (QFL). bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults to 'DistancePointBBoxCoder'. reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}`` in QFL setting. Defaults to 16. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), loss_dfl: ConfigType = dict( type='DistributionFocalLoss', loss_weight=0.25), bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), reg_max: int = 16, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='gfl_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super().__init__( num_classes=num_classes, in_channels=in_channels, bbox_coder=bbox_coder, init_cfg=init_cfg, **kwargs) if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self.integral = Integral(self.reg_max) self.loss_dfl = MODELS.build(loss_dfl) iou_calculator = dict(type='BboxOverlaps2D') self.iou_calculator = TASK_UTILS.build(iou_calculator) # for import v3 self.loss_cls_for_replay_v3 = MODELS.build(dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU() self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) assert self.num_anchors == 1, 'anchor free version' self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification and quality (IoU) joint scores for all scale levels, each is a 4D-tensor, the channel number is num_classes. - bbox_preds (list[Tensor]): Box distribution logits for all scale levels, each is a 4D-tensor, the channel number is 4*(n+1), n is max value of integral set. """ return multi_apply(self.forward_single, x, self.scales) def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: - cls_score (Tensor): Cls and quality joint scores for a single scale level the channel number is num_classes. - bbox_pred (Tensor): Box distribution logits for a single scale level, the channel number is 4*(n+1), n is max value of integral set. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.gfl_cls(cls_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() return cls_score, bbox_pred def anchor_center(self, anchors: Tensor) -> Tensor: """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), ``xyxy`` format. Returns: Tensor: Anchor centers with shape (N, 2), ``xy`` format. """ anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, stride: Tuple[int], avg_factor: int) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (Tuple[int]): Stride in this scale level. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=avg_factor) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, losses_dfl, \ avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, avg_factor=avg_factor) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image. GFL head does not need this value. mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (:obj: `ConfigDict`): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor]: Results of detected bboxes and labels. If with_nms is False and mlvl_score_factor is None, return mlvl_bboxes and mlvl_scores, else return mlvl_bboxes, mlvl_scores and mlvl_score_factor. Usually with_nms is False is used for aug test. If with_nms is True, then return the following format - det_bboxes (Tensor): Predicted bboxes with shape [num_bboxes, 5], where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column are scores between 0 and 1. - det_labels (Tensor): Predicted labels of the corresponding box with shape [num_bboxes]. """ cfg = self.test_cfg if cfg is None else cfg img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( zip(cls_score_list, bbox_pred_list, self.prior_generator.strides, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] assert stride[0] == stride[1] bbox_pred = bbox_pred.permute(1, 2, 0) bbox_pred = self.integral(bbox_pred) * stride[0] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self.bbox_coder.decode( self.anchor_center(priors), bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def get_targets(self, anchor_list: List[Tensor], valid_flag_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs=True) -> tuple: """Get targets for GFL head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors (list[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: N is the number of total anchors in the image. - anchors (Tensor): All anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with shape (N,). - label_weights (Tensor): Label weights of all anchor in the image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4). - pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). - neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). - sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign( pred_instances=pred_instances, num_level_priors=num_level_anchors_inside, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors,), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_num_level_anchors_inside(self, num_level_anchors: List[int], inside_flags: Tensor) -> List[int]: """Get the number of valid anchors in every level.""" split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
29,890
42.957353
116
py
ERD
ERD-main/mmdet/models/dense_heads/centernet_update_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import Scale from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox2distance from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from ..utils import multi_apply from .anchor_free_head import AnchorFreeHead INF = 1000000000 RangeType = Sequence[Tuple[int, int]] def _transpose(tensor_list: List[Tensor], num_point_list: list) -> List[Tensor]: """This function is used to transpose image first tensors to level first ones.""" for img_idx in range(len(tensor_list)): tensor_list[img_idx] = torch.split( tensor_list[img_idx], num_point_list, dim=0) tensors_level_first = [] for targets_per_level in zip(*tensor_list): tensors_level_first.append(torch.cat(targets_per_level, dim=0)) return tensors_level_first @MODELS.register_module() class CenterNetUpdateHead(AnchorFreeHead): """CenterNetUpdateHead is an improved version of CenterNet in CenterNet2. Paper link `<https://arxiv.org/abs/2103.07461>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channel in the input feature map. regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. hm_min_radius (int): Heatmap target minimum radius of cls branch. Defaults to 4. hm_min_overlap (float): Heatmap target minimum overlap of cls branch. Defaults to 0.8. more_pos_thresh (float): The filtering threshold when the cls branch adds more positive samples. Defaults to 0.2. more_pos_topk (int): The maximum number of additional positive samples added to each gt. Defaults to 9. soft_weight_on_reg (bool): Whether to use the soft target of the cls branch as the soft weight of the bbox branch. Defaults to False. loss_cls (:obj:`ConfigDict` or dict): Config of cls loss. Defaults to dict(type='GaussianFocalLoss', loss_weight=1.0) loss_bbox (:obj:`ConfigDict` or dict): Config of bbox loss. Defaults to dict(type='GIoULoss', loss_weight=2.0). norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Unused in CenterNet. Reserved for compatibility with SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CenterNet. """ def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((0, 80), (64, 160), (128, 320), (256, 640), (512, INF)), hm_min_radius: int = 4, hm_min_overlap: float = 0.8, more_pos_thresh: float = 0.2, more_pos_topk: int = 9, soft_weight_on_reg: bool = False, loss_cls: ConfigType = dict( type='GaussianFocalLoss', pos_weight=0.25, neg_weight=0.75, loss_weight=1.0), loss_bbox: ConfigType = dict( type='GIoULoss', loss_weight=2.0), norm_cfg: OptConfigType = dict( type='GN', num_groups=32, requires_grad=True), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, train_cfg=train_cfg, test_cfg=test_cfg, **kwargs) self.soft_weight_on_reg = soft_weight_on_reg self.hm_min_radius = hm_min_radius self.more_pos_thresh = more_pos_thresh self.more_pos_topk = more_pos_topk self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap) self.sigmoid_clamp = 0.0001 # GaussianFocalLoss must be sigmoid mode self.use_sigmoid_cls = True self.cls_out_channels = num_classes self.regress_ranges = regress_ranges self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def _init_predictor(self) -> None: """Initialize predictor layers of the head.""" self.conv_cls = nn.Conv2d( self.feat_channels, self.num_classes, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of each level outputs. - cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is 4. """ return multi_apply(self.forward_single, x, self.scales, self.strides) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps. Returns: tuple: scores for each class, bbox predictions of input feature maps. """ cls_score, bbox_pred, _, _ = super().forward_single(x) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride return cls_score, bbox_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = cls_scores[0].size(0) assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) # 1 flatten outputs flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) assert (torch.isfinite(flatten_bbox_preds).all().item()) # 2 calc reg and cls branch targets cls_targets, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) # 3 add more pos index for cls branch featmap_sizes = flatten_points.new_tensor(featmap_sizes) pos_inds, cls_labels = self.add_cls_pos_inds(flatten_points, flatten_bbox_preds, featmap_sizes, batch_gt_instances) # 4 calc cls loss if pos_inds is None: # num_gts=0 num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float) else: num_pos_cls = bbox_preds[0].new_tensor( len(pos_inds), dtype=torch.float) num_pos_cls = max(reduce_mean(num_pos_cls), 1.0) flatten_cls_scores = flatten_cls_scores.sigmoid().clamp( min=self.sigmoid_clamp, max=1 - self.sigmoid_clamp) cls_loss = self.loss_cls( flatten_cls_scores, cls_targets, pos_inds=pos_inds, pos_labels=cls_labels, avg_factor=num_pos_cls) # 5 calc reg loss pos_bbox_inds = torch.nonzero( bbox_targets.max(dim=1)[0] >= 0).squeeze(1) pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds] pos_bbox_targets = bbox_targets[pos_bbox_inds] bbox_weight_map = cls_targets.max(dim=1)[0] bbox_weight_map = bbox_weight_map[pos_bbox_inds] bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \ else torch.ones_like(bbox_weight_map) num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0) if len(pos_bbox_inds) > 0: pos_points = flatten_points[pos_bbox_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) bbox_loss = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=bbox_weight_map, avg_factor=num_pos_bbox) else: bbox_loss = flatten_bbox_preds.sum() * 0 return dict(loss_cls=cls_loss, loss_bbox=bbox_loss) def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList, ) -> Tuple[Tensor, Tensor]: """Compute classification and bbox targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Targets of each level. - concat_lvl_labels (Tensor): Labels of all level and batch. - concat_lvl_bbox_targets (Tensor): BBox targets of all \ level and batch. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) concat_strides = torch.cat([ concat_points.new_ones(num_points[i]) * self.strides[i] for i in range(num_levels) ]) # get labels and bbox_targets of each image cls_targets_list, bbox_targets_list = multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points, regress_ranges=concat_regress_ranges, strides=concat_strides) bbox_targets_list = _transpose(bbox_targets_list, num_points) cls_targets_list = _transpose(cls_targets_list, num_points) concat_lvl_bbox_targets = torch.cat(bbox_targets_list, 0) concat_lvl_cls_targets = torch.cat(cls_targets_list, dim=0) return concat_lvl_cls_targets, concat_lvl_bbox_targets def _get_targets_single(self, gt_instances: InstanceData, points: Tensor, regress_ranges: Tensor, strides: Tensor) -> Tuple[Tensor, Tensor]: """Compute classification and bbox targets for a single image.""" num_points = points.size(0) num_gts = len(gt_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels if num_gts == 0: return gt_labels.new_full((num_points, self.num_classes), self.num_classes), \ gt_bboxes.new_full((num_points, 4), -1) # Calculate the regression tblr target corresponding to all points points = points[:, None].expand(num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) strides = strides[:, None, None].expand(num_points, num_gts, 2) bbox_target = bbox2distance(points, gt_bboxes) # M x N x 4 # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_target.min(dim=2)[0] > 0 # M x N # condition2: Calculate the nearest points from # the upper, lower, left and right ranges from # the center of the gt bbox centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2) centers_discret = ((centers / strides).int() * strides).float() + \ strides / 2 centers_discret_dist = points - centers_discret dist_x = centers_discret_dist[..., 0].abs() dist_y = centers_discret_dist[..., 1].abs() inside_gt_center3x3_mask = (dist_x <= strides[..., 0]) & \ (dist_y <= strides[..., 0]) # condition3: limit the regression range for each location bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:] crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2 inside_fpn_level_mask = (crit >= regress_ranges[:, [0]]) & \ (crit <= regress_ranges[:, [1]]) bbox_target_mask = inside_gt_bbox_mask & \ inside_gt_center3x3_mask & \ inside_fpn_level_mask # Calculate the distance weight map gt_center_peak_mask = ((centers_discret_dist**2).sum(dim=2) == 0) weighted_dist = ((points - centers)**2).sum(dim=2) # M x N weighted_dist[gt_center_peak_mask] = 0 areas = (gt_bboxes[..., 2] - gt_bboxes[..., 0]) * ( gt_bboxes[..., 3] - gt_bboxes[..., 1]) radius = self.delta**2 * 2 * areas radius = torch.clamp(radius, min=self.hm_min_radius**2) weighted_dist = weighted_dist / radius # Calculate bbox_target bbox_weighted_dist = weighted_dist.clone() bbox_weighted_dist[bbox_target_mask == 0] = INF * 1.0 min_dist, min_inds = bbox_weighted_dist.min(dim=1) bbox_target = bbox_target[range(len(bbox_target)), min_inds] # M x N x 4 --> M x 4 bbox_target[min_dist == INF] = -INF # Convert to feature map scale bbox_target /= strides[:, 0, :].repeat(1, 2) # Calculate cls_target cls_target = self._create_heatmaps_from_dist(weighted_dist, gt_labels) return cls_target, bbox_target @torch.no_grad() def add_cls_pos_inds( self, flatten_points: Tensor, flatten_bbox_preds: Tensor, featmap_sizes: Tensor, batch_gt_instances: InstanceList ) -> Tuple[Optional[Tensor], Optional[Tensor]]: """Provide additional adaptive positive samples to the classification branch. Args: flatten_points (Tensor): The point after flatten, including batch image and all levels. The shape is (N, 2). flatten_bbox_preds (Tensor): The bbox predicts after flatten, including batch image and all levels. The shape is (N, 4). featmap_sizes (Tensor): Feature map size of all layers. The shape is (5, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: - pos_inds (Tensor): Adaptively selected positive sample index. - cls_labels (Tensor): Corresponding positive class label. """ outputs = self._get_center3x3_region_index_targets( batch_gt_instances, featmap_sizes) cls_labels, fpn_level_masks, center3x3_inds, \ center3x3_bbox_targets, center3x3_masks = outputs num_gts, total_level, K = cls_labels.shape[0], len( self.strides), center3x3_masks.shape[-1] if num_gts == 0: return None, None # The out-of-bounds index is forcibly set to 0 # to prevent loss calculation errors center3x3_inds[center3x3_masks == 0] = 0 reg_pred_center3x3 = flatten_bbox_preds[center3x3_inds] center3x3_points = flatten_points[center3x3_inds].view(-1, 2) center3x3_bbox_targets_expand = center3x3_bbox_targets.view( -1, 4).clamp(min=0) pos_decoded_bbox_preds = self.bbox_coder.decode( center3x3_points, reg_pred_center3x3.view(-1, 4)) pos_decoded_target_preds = self.bbox_coder.decode( center3x3_points, center3x3_bbox_targets_expand) center3x3_bbox_loss = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, None, reduction_override='none').view(num_gts, total_level, K) / self.loss_bbox.loss_weight # Invalid index Loss set to infinity center3x3_bbox_loss[center3x3_masks == 0] = INF # 4 is the center point of the sampled 9 points, the center point # of gt bbox after discretization. # The center point of gt bbox after discretization # must be a positive sample, so we force its loss to be set to 0. center3x3_bbox_loss.view(-1, K)[fpn_level_masks.view(-1), 4] = 0 center3x3_bbox_loss = center3x3_bbox_loss.view(num_gts, -1) loss_thr = torch.kthvalue( center3x3_bbox_loss, self.more_pos_topk, dim=1)[0] loss_thr[loss_thr > self.more_pos_thresh] = self.more_pos_thresh new_pos = center3x3_bbox_loss < loss_thr.view(num_gts, 1) pos_inds = center3x3_inds.view(num_gts, -1)[new_pos] cls_labels = cls_labels.view(num_gts, 1).expand(num_gts, total_level * K)[new_pos] return pos_inds, cls_labels def _create_heatmaps_from_dist(self, weighted_dist: Tensor, cls_labels: Tensor) -> Tensor: """Generate heatmaps of classification branch based on weighted distance map.""" heatmaps = weighted_dist.new_zeros( (weighted_dist.shape[0], self.num_classes)) for c in range(self.num_classes): inds = (cls_labels == c) # N if inds.int().sum() == 0: continue heatmaps[:, c] = torch.exp(-weighted_dist[:, inds].min(dim=1)[0]) zeros = heatmaps[:, c] < 1e-4 heatmaps[zeros, c] = 0 return heatmaps def _get_center3x3_region_index_targets(self, bacth_gt_instances: InstanceList, shapes_per_level: Tensor) -> tuple: """Get the center (and the 3x3 region near center) locations and target of each objects.""" cls_labels = [] inside_fpn_level_masks = [] center3x3_inds = [] center3x3_masks = [] center3x3_bbox_targets = [] total_levels = len(self.strides) batch = len(bacth_gt_instances) shapes_per_level = shapes_per_level.long() area_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]) # Select a total of 9 positions of 3x3 in the center of the gt bbox # as candidate positive samples K = 9 dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0, 1]).view(1, 1, K) dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1, 1]).view(1, 1, K) regress_ranges = shapes_per_level.new_tensor(self.regress_ranges).view( len(self.regress_ranges), 2) # L x 2 strides = shapes_per_level.new_tensor(self.strides) start_coord_pre_level = [] _start = 0 for level in range(total_levels): start_coord_pre_level.append(_start) _start = _start + batch * area_per_level[level] start_coord_pre_level = shapes_per_level.new_tensor( start_coord_pre_level).view(1, total_levels, 1) area_per_level = area_per_level.view(1, total_levels, 1) for im_i in range(batch): gt_instance = bacth_gt_instances[im_i] gt_bboxes = gt_instance.bboxes gt_labels = gt_instance.labels num_gts = gt_bboxes.shape[0] if num_gts == 0: continue cls_labels.append(gt_labels) gt_bboxes = gt_bboxes[:, None].expand(num_gts, total_levels, 4) expanded_strides = strides[None, :, None].expand(num_gts, total_levels, 2) expanded_regress_ranges = regress_ranges[None].expand( num_gts, total_levels, 2) expanded_shapes_per_level = shapes_per_level[None].expand( num_gts, total_levels, 2) # calc reg_target centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2) centers_inds = (centers / expanded_strides).long() centers_discret = centers_inds * expanded_strides \ + expanded_strides // 2 bbox_target = bbox2distance(centers_discret, gt_bboxes) # M x N x 4 # calc inside_fpn_level_mask bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:] crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2 inside_fpn_level_mask = \ (crit >= expanded_regress_ranges[..., 0]) & \ (crit <= expanded_regress_ranges[..., 1]) inside_gt_bbox_mask = bbox_target.min(dim=2)[0] >= 0 inside_fpn_level_mask = inside_gt_bbox_mask & inside_fpn_level_mask inside_fpn_level_masks.append(inside_fpn_level_mask) # calc center3x3_ind and mask expand_ws = expanded_shapes_per_level[..., 1:2].expand( num_gts, total_levels, K) expand_hs = expanded_shapes_per_level[..., 0:1].expand( num_gts, total_levels, K) centers_inds_x = centers_inds[..., 0:1] centers_inds_y = centers_inds[..., 1:2] center3x3_idx = start_coord_pre_level + \ im_i * area_per_level + \ (centers_inds_y + dy) * expand_ws + \ (centers_inds_x + dx) center3x3_mask = \ ((centers_inds_y + dy) < expand_hs) & \ ((centers_inds_y + dy) >= 0) & \ ((centers_inds_x + dx) < expand_ws) & \ ((centers_inds_x + dx) >= 0) # recalc center3x3 region reg target bbox_target = bbox_target / expanded_strides.repeat(1, 1, 2) center3x3_bbox_target = bbox_target[..., None, :].expand( num_gts, total_levels, K, 4).clone() center3x3_bbox_target[..., 0] += dx center3x3_bbox_target[..., 1] += dy center3x3_bbox_target[..., 2] -= dx center3x3_bbox_target[..., 3] -= dy # update center3x3_mask center3x3_mask = center3x3_mask & ( center3x3_bbox_target.min(dim=3)[0] >= 0) # n x L x K center3x3_inds.append(center3x3_idx) center3x3_masks.append(center3x3_mask) center3x3_bbox_targets.append(center3x3_bbox_target) if len(inside_fpn_level_masks) > 0: cls_labels = torch.cat(cls_labels, dim=0) inside_fpn_level_masks = torch.cat(inside_fpn_level_masks, dim=0) center3x3_inds = torch.cat(center3x3_inds, dim=0).long() center3x3_bbox_targets = torch.cat(center3x3_bbox_targets, dim=0) center3x3_masks = torch.cat(center3x3_masks, dim=0) else: cls_labels = shapes_per_level.new_zeros(0).long() inside_fpn_level_masks = shapes_per_level.new_zeros( (0, total_levels)).bool() center3x3_inds = shapes_per_level.new_zeros( (0, total_levels, K)).long() center3x3_bbox_targets = shapes_per_level.new_zeros( (0, total_levels, K, 4)).float() center3x3_masks = shapes_per_level.new_zeros( (0, total_levels, K)).bool() return cls_labels, inside_fpn_level_masks, center3x3_inds, \ center3x3_bbox_targets, center3x3_masks
27,030
42.2496
79
py
ERD
ERD-main/mmdet/models/dense_heads/corner_head.py
# Copyright (c) OpenMMLab. All rights reserved. from logging import warning from math import ceil, log from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import CornerPool, batched_nms from mmengine.config import ConfigDict from mmengine.model import BaseModule, bias_init_with_prob from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, OptMultiConfig) from ..utils import (gather_feat, gaussian_radius, gen_gaussian_target, get_local_maximum, get_topk_from_heatmap, multi_apply, transpose_and_gather_feat) from .base_dense_head import BaseDenseHead class BiCornerPool(BaseModule): """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) Args: in_channels (int): Input channels of module. directions (list[str]): Directions of two CornerPools. out_channels (int): Output channels of module. feat_channels (int): Feature channels of module. norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config norm layer. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. """ def __init__(self, in_channels: int, directions: List[int], feat_channels: int = 128, out_channels: int = 128, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg) self.direction1_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction2_conv = ConvModule( in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) self.aftpool_conv = ConvModule( feat_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None) self.conv1 = ConvModule( in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.conv2 = ConvModule( in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) self.direction1_pool = CornerPool(directions[0]) self.direction2_pool = CornerPool(directions[1]) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor) -> Tensor: """Forward features from the upstream network. Args: x (tensor): Input feature of BiCornerPool. Returns: conv2 (tensor): Output feature of BiCornerPool. """ direction1_conv = self.direction1_conv(x) direction2_conv = self.direction2_conv(x) direction1_feat = self.direction1_pool(direction1_conv) direction2_feat = self.direction2_pool(direction2_conv) aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) conv1 = self.conv1(x) relu = self.relu(aftpool_conv + conv1) conv2 = self.conv2(relu) return conv2 @MODELS.register_module() class CornerHead(BaseDenseHead): """Head of CornerNet: Detecting Objects as Paired Keypoints. Code is modified from the `official github repo <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/ kp.py#L73>`_ . More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ . Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. num_feat_levels (int): Levels of feature from the previous module. 2 for HourglassNet-104 and 1 for HourglassNet-52. Because HourglassNet-104 outputs the final feature and intermediate supervision feature and HourglassNet-52 only outputs the final feature. Defaults to 2. corner_emb_channels (int): Channel of embedding vector. Defaults to 1. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Useless in CornerHead, but we keep this variable for SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CornerHead. loss_heatmap (:obj:`ConfigDict` or dict): Config of corner heatmap loss. Defaults to GaussianFocalLoss. loss_embedding (:obj:`ConfigDict` or dict): Config of corner embedding loss. Defaults to AssociativeEmbeddingLoss. loss_offset (:obj:`ConfigDict` or dict): Config of corner offset loss. Defaults to SmoothL1Loss. init_cfg (:obj:`ConfigDict` or dict, optional): the config to control the initialization. """ def __init__(self, num_classes: int, in_channels: int, num_feat_levels: int = 2, corner_emb_channels: int = 1, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, loss_heatmap: ConfigType = dict( type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), loss_embedding: ConfigType = dict( type='AssociativeEmbeddingLoss', pull_weight=0.25, push_weight=0.25), loss_offset: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1), init_cfg: OptMultiConfig = None) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.corner_emb_channels = corner_emb_channels self.with_corner_emb = self.corner_emb_channels > 0 self.corner_offset_channels = 2 self.num_feat_levels = num_feat_levels self.loss_heatmap = MODELS.build( loss_heatmap) if loss_heatmap is not None else None self.loss_embedding = MODELS.build( loss_embedding) if loss_embedding is not None else None self.loss_offset = MODELS.build( loss_offset) if loss_offset is not None else None self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _make_layers(self, out_channels: int, in_channels: int = 256, feat_channels: int = 256) -> nn.Sequential: """Initialize conv sequential for CornerHead.""" return nn.Sequential( ConvModule(in_channels, feat_channels, 3, padding=1), ConvModule( feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) def _init_corner_kpt_layers(self) -> None: """Initialize corner keypoint layers. Including corner heatmap branch and corner offset branch. Each branch has two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_pool.append( BiCornerPool( self.in_channels, ['top', 'left'], out_channels=self.in_channels)) self.br_pool.append( BiCornerPool( self.in_channels, ['bottom', 'right'], out_channels=self.in_channels)) self.tl_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.br_heat.append( self._make_layers( out_channels=self.num_classes, in_channels=self.in_channels)) self.tl_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) self.br_off.append( self._make_layers( out_channels=self.corner_offset_channels, in_channels=self.in_channels)) def _init_corner_emb_layers(self) -> None: """Initialize corner embedding layers. Only include corner embedding branch with two parts: prefix `tl_` for top-left and `br_` for bottom-right. """ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() for _ in range(self.num_feat_levels): self.tl_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) self.br_emb.append( self._make_layers( out_channels=self.corner_emb_channels, in_channels=self.in_channels)) def _init_layers(self) -> None: """Initialize layers for CornerHead. Including two parts: corner keypoint layers and corner embedding layers """ self._init_corner_kpt_layers() if self.with_corner_emb: self._init_corner_emb_layers() def init_weights(self) -> None: super().init_weights() bias_init = bias_init_with_prob(0.1) for i in range(self.num_feat_levels): # The initialization of parameters are different between # nn.Conv2d and ConvModule. Our experiments show that # using the original initialization of nn.Conv2d increases # the final mAP by about 0.2% self.tl_heat[i][-1].conv.reset_parameters() self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) self.br_heat[i][-1].conv.reset_parameters() self.br_heat[i][-1].conv.bias.data.fill_(bias_init) self.tl_off[i][-1].conv.reset_parameters() self.br_off[i][-1].conv.reset_parameters() if self.with_corner_emb: self.tl_emb[i][-1].conv.reset_parameters() self.br_emb[i][-1].conv.reset_parameters() def forward(self, feats: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of corner heatmaps, offset heatmaps and embedding heatmaps. - tl_heats (list[Tensor]): Top-left corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - br_heats (list[Tensor]): Bottom-right corner heatmaps for all levels, each is a 4D-tensor, the channels number is num_classes. - tl_embs (list[Tensor] | list[None]): Top-left embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - br_embs (list[Tensor] | list[None]): Bottom-right embedding heatmaps for all levels, each is a 4D-tensor or None. If not None, the channels number is corner_emb_channels. - tl_offs (list[Tensor]): Top-left offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. - br_offs (list[Tensor]): Bottom-right offset heatmaps for all levels, each is a 4D-tensor. The channels number is corner_offset_channels. """ lvl_ind = list(range(self.num_feat_levels)) return multi_apply(self.forward_single, feats, lvl_ind) def forward_single(self, x: Tensor, lvl_ind: int, return_pool: bool = False) -> List[Tensor]: """Forward feature of a single level. Args: x (Tensor): Feature of a single level. lvl_ind (int): Level index of current feature. return_pool (bool): Return corner pool feature or not. Defaults to False. Returns: tuple[Tensor]: A tuple of CornerHead's output for current feature level. Containing the following Tensors: - tl_heat (Tensor): Predicted top-left corner heatmap. - br_heat (Tensor): Predicted bottom-right corner heatmap. - tl_emb (Tensor | None): Predicted top-left embedding heatmap. None for `self.with_corner_emb == False`. - br_emb (Tensor | None): Predicted bottom-right embedding heatmap. None for `self.with_corner_emb == False`. - tl_off (Tensor): Predicted top-left offset heatmap. - br_off (Tensor): Predicted bottom-right offset heatmap. - tl_pool (Tensor): Top-left corner pool feature. Not must have. - br_pool (Tensor): Bottom-right corner pool feature. Not must have. """ tl_pool = self.tl_pool[lvl_ind](x) tl_heat = self.tl_heat[lvl_ind](tl_pool) br_pool = self.br_pool[lvl_ind](x) br_heat = self.br_heat[lvl_ind](br_pool) tl_emb, br_emb = None, None if self.with_corner_emb: tl_emb = self.tl_emb[lvl_ind](tl_pool) br_emb = self.br_emb[lvl_ind](br_pool) tl_off = self.tl_off[lvl_ind](tl_pool) br_off = self.br_off[lvl_ind](br_pool) result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] if return_pool: result_list.append(tl_pool) result_list.append(br_pool) return result_list def get_targets(self, gt_bboxes: List[Tensor], gt_labels: List[Tensor], feat_shape: Sequence[int], img_shape: Sequence[int], with_corner_emb: bool = False, with_guiding_shift: bool = False, with_centripetal_shift: bool = False) -> dict: """Generate corner targets. Including corner heatmap, corner offset. Optional: corner embedding, corner guiding shift, centripetal shift. For CornerNet, we generate corner heatmap, corner offset and corner embedding from this function. For CentripetalNet, we generate corner heatmap, corner offset, guiding shift and centripetal shift from this function. Args: gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt, ). feat_shape (Sequence[int]): Shape of output feature, [batch, channel, height, width]. img_shape (Sequence[int]): Shape of input image, [height, width, channel]. with_corner_emb (bool): Generate corner embedding target or not. Defaults to False. with_guiding_shift (bool): Generate guiding shift target or not. Defaults to False. with_centripetal_shift (bool): Generate centripetal shift target or not. Defaults to False. Returns: dict: Ground truth of corner heatmap, corner offset, corner embedding, guiding shift and centripetal shift. Containing the following keys: - topleft_heatmap (Tensor): Ground truth top-left corner heatmap. - bottomright_heatmap (Tensor): Ground truth bottom-right corner heatmap. - topleft_offset (Tensor): Ground truth top-left corner offset. - bottomright_offset (Tensor): Ground truth bottom-right corner offset. - corner_embedding (list[list[list[int]]]): Ground truth corner embedding. Not must have. - topleft_guiding_shift (Tensor): Ground truth top-left corner guiding shift. Not must have. - bottomright_guiding_shift (Tensor): Ground truth bottom-right corner guiding shift. Not must have. - topleft_centripetal_shift (Tensor): Ground truth top-left corner centripetal shift. Not must have. - bottomright_centripetal_shift (Tensor): Ground truth bottom-right corner centripetal shift. Not must have. """ batch_size, _, height, width = feat_shape img_h, img_w = img_shape[:2] width_ratio = float(width / img_w) height_ratio = float(height / img_h) gt_tl_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_br_heatmap = gt_bboxes[-1].new_zeros( [batch_size, self.num_classes, height, width]) gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) if with_corner_emb: match = [] # Guiding shift is a kind of offset, from center to corner if with_guiding_shift: gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_guiding_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) # Centripetal shift is also a kind of offset, from center to corner # and normalized by log. if with_centripetal_shift: gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( [batch_size, 2, height, width]) for batch_id in range(batch_size): # Ground truth of corner embedding per image is a list of coord set corner_match = [] for box_id in range(len(gt_labels[batch_id])): left, top, right, bottom = gt_bboxes[batch_id][box_id] center_x = (left + right) / 2.0 center_y = (top + bottom) / 2.0 label = gt_labels[batch_id][box_id] # Use coords in the feature level to generate ground truth scale_left = left * width_ratio scale_right = right * width_ratio scale_top = top * height_ratio scale_bottom = bottom * height_ratio scale_center_x = center_x * width_ratio scale_center_y = center_y * height_ratio # Int coords on feature map/ground truth tensor left_idx = int(min(scale_left, width - 1)) right_idx = int(min(scale_right, width - 1)) top_idx = int(min(scale_top, height - 1)) bottom_idx = int(min(scale_bottom, height - 1)) # Generate gaussian heatmap scale_box_width = ceil(scale_right - scale_left) scale_box_height = ceil(scale_bottom - scale_top) radius = gaussian_radius((scale_box_height, scale_box_width), min_overlap=0.3) radius = max(0, int(radius)) gt_tl_heatmap[batch_id, label] = gen_gaussian_target( gt_tl_heatmap[batch_id, label], [left_idx, top_idx], radius) gt_br_heatmap[batch_id, label] = gen_gaussian_target( gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], radius) # Generate corner offset left_offset = scale_left - left_idx top_offset = scale_top - top_idx right_offset = scale_right - right_idx bottom_offset = scale_bottom - bottom_idx gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset gt_br_offset[batch_id, 1, bottom_idx, right_idx] = bottom_offset # Generate corner embedding if with_corner_emb: corner_match.append([[top_idx, left_idx], [bottom_idx, right_idx]]) # Generate guiding shift if with_guiding_shift: gt_tl_guiding_shift[batch_id, 0, top_idx, left_idx] = scale_center_x - left_idx gt_tl_guiding_shift[batch_id, 1, top_idx, left_idx] = scale_center_y - top_idx gt_br_guiding_shift[batch_id, 0, bottom_idx, right_idx] = right_idx - scale_center_x gt_br_guiding_shift[ batch_id, 1, bottom_idx, right_idx] = bottom_idx - scale_center_y # Generate centripetal shift if with_centripetal_shift: gt_tl_centripetal_shift[batch_id, 0, top_idx, left_idx] = log(scale_center_x - scale_left) gt_tl_centripetal_shift[batch_id, 1, top_idx, left_idx] = log(scale_center_y - scale_top) gt_br_centripetal_shift[batch_id, 0, bottom_idx, right_idx] = log(scale_right - scale_center_x) gt_br_centripetal_shift[batch_id, 1, bottom_idx, right_idx] = log(scale_bottom - scale_center_y) if with_corner_emb: match.append(corner_match) target_result = dict( topleft_heatmap=gt_tl_heatmap, topleft_offset=gt_tl_offset, bottomright_heatmap=gt_br_heatmap, bottomright_offset=gt_br_offset) if with_corner_emb: target_result.update(corner_embedding=match) if with_guiding_shift: target_result.update( topleft_guiding_shift=gt_tl_guiding_shift, bottomright_guiding_shift=gt_br_guiding_shift) if with_centripetal_shift: target_result.update( topleft_centripetal_shift=gt_tl_centripetal_shift, bottomright_centripetal_shift=gt_br_centripetal_shift) return target_result def loss_by_feat( self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_embs: List[Tensor], br_embs: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. Containing the following losses: - det_loss (list[Tensor]): Corner keypoint losses of all feature levels. - pull_loss (list[Tensor]): Part one of AssociativeEmbedding losses of all feature levels. - push_loss (list[Tensor]): Part two of AssociativeEmbedding losses of all feature levels. - off_loss (list[Tensor]): Corner offset losses of all feature levels. """ gt_bboxes = [ gt_instances.bboxes for gt_instances in batch_gt_instances ] gt_labels = [ gt_instances.labels for gt_instances in batch_gt_instances ] targets = self.get_targets( gt_bboxes, gt_labels, tl_heats[-1].shape, batch_img_metas[0]['batch_input_shape'], with_corner_emb=self.with_corner_emb) mlvl_targets = [targets for _ in range(self.num_feat_levels)] det_losses, pull_losses, push_losses, off_losses = multi_apply( self.loss_by_feat_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, mlvl_targets) loss_dict = dict(det_loss=det_losses, off_loss=off_losses) if self.with_corner_emb: loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) return loss_dict def loss_by_feat_single(self, tl_hmp: Tensor, br_hmp: Tensor, tl_emb: Optional[Tensor], br_emb: Optional[Tensor], tl_off: Tensor, br_off: Tensor, targets: dict) -> Tuple[Tensor, ...]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: tl_hmp (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_hmp (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_emb (Tensor, optional): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor, optional): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). targets (dict): Corner target generated by `get_targets`. Returns: tuple[torch.Tensor]: Losses of the head's different branches containing the following losses: - det_loss (Tensor): Corner keypoint loss. - pull_loss (Tensor): Part one of AssociativeEmbedding loss. - push_loss (Tensor): Part two of AssociativeEmbedding loss. - off_loss (Tensor): Corner offset loss. """ gt_tl_hmp = targets['topleft_heatmap'] gt_br_hmp = targets['bottomright_heatmap'] gt_tl_off = targets['topleft_offset'] gt_br_off = targets['bottomright_offset'] gt_embedding = targets['corner_embedding'] # Detection loss tl_det_loss = self.loss_heatmap( tl_hmp.sigmoid(), gt_tl_hmp, avg_factor=max(1, gt_tl_hmp.eq(1).sum())) br_det_loss = self.loss_heatmap( br_hmp.sigmoid(), gt_br_hmp, avg_factor=max(1, gt_br_hmp.eq(1).sum())) det_loss = (tl_det_loss + br_det_loss) / 2.0 # AssociativeEmbedding loss if self.with_corner_emb and self.loss_embedding is not None: pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, gt_embedding) else: pull_loss, push_loss = None, None # Offset loss # We only compute the offset loss at the real corner position. # The value of real corner would be 1 in heatmap ground truth. # The mask is computed in class agnostic mode and its shape is # batch * 1 * width * height. tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_tl_hmp) br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( gt_br_hmp) tl_off_loss = self.loss_offset( tl_off, gt_tl_off, tl_off_mask, avg_factor=max(1, tl_off_mask.sum())) br_off_loss = self.loss_offset( br_off, gt_br_off, br_off_mask, avg_factor=max(1, br_off_mask.sum())) off_loss = (tl_off_loss + br_off_loss) / 2.0 return det_loss, pull_loss, push_loss, off_loss def predict_by_feat(self, tl_heats: List[Tensor], br_heats: List[Tensor], tl_embs: List[Tensor], br_embs: List[Tensor], tl_offs: List[Tensor], br_offs: List[Tensor], batch_img_metas: Optional[List[dict]] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: tl_heats (list[Tensor]): Top-left corner heatmaps for each level with shape (N, num_classes, H, W). br_heats (list[Tensor]): Bottom-right corner heatmaps for each level with shape (N, num_classes, H, W). tl_embs (list[Tensor]): Top-left corner embeddings for each level with shape (N, corner_emb_channels, H, W). br_embs (list[Tensor]): Bottom-right corner embeddings for each level with shape (N, corner_emb_channels, H, W). tl_offs (list[Tensor]): Top-left corner offsets for each level with shape (N, corner_offset_channels, H, W). br_offs (list[Tensor]): Bottom-right corner offsets for each level with shape (N, corner_offset_channels, H, W). batch_img_metas (list[dict], optional): Batch image meta info. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( batch_img_metas) result_list = [] for img_id in range(len(batch_img_metas)): result_list.append( self._predict_by_feat_single( tl_heats[-1][img_id:img_id + 1, :], br_heats[-1][img_id:img_id + 1, :], tl_offs[-1][img_id:img_id + 1, :], br_offs[-1][img_id:img_id + 1, :], batch_img_metas[img_id], tl_emb=tl_embs[-1][img_id:img_id + 1, :], br_emb=br_embs[-1][img_id:img_id + 1, :], rescale=rescale, with_nms=with_nms)) return result_list def _predict_by_feat_single(self, tl_heat: Tensor, br_heat: Tensor, tl_off: Tensor, br_off: Tensor, img_meta: dict, tl_emb: Optional[Tensor] = None, br_emb: Optional[Tensor] = None, tl_centripetal_shift: Optional[Tensor] = None, br_centripetal_shift: Optional[Tensor] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. tl_emb (Tensor): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift: Top-left corner's centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift: Bottom-right corner's centripetal shift for current level with shape (N, 2, H, W). rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if isinstance(img_meta, (list, tuple)): img_meta = img_meta[0] batch_bboxes, batch_scores, batch_clses = self._decode_heatmap( tl_heat=tl_heat.sigmoid(), br_heat=br_heat.sigmoid(), tl_off=tl_off, br_off=br_off, tl_emb=tl_emb, br_emb=br_emb, tl_centripetal_shift=tl_centripetal_shift, br_centripetal_shift=br_centripetal_shift, img_meta=img_meta, k=self.test_cfg.corner_topk, kernel=self.test_cfg.local_maximum_kernel, distance_threshold=self.test_cfg.distance_threshold) if rescale and 'scale_factor' in img_meta: batch_bboxes /= batch_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) bboxes = batch_bboxes.view([-1, 4]) scores = batch_scores.view(-1) clses = batch_clses.view(-1) det_bboxes = torch.cat([bboxes, scores.unsqueeze(-1)], -1) keepinds = (det_bboxes[:, -1] > -0.1) det_bboxes = det_bboxes[keepinds] det_labels = clses[keepinds] if with_nms: det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, self.test_cfg) results = InstanceData() results.bboxes = det_bboxes[..., :4] results.scores = det_bboxes[..., 4] results.labels = det_labels return results def _bboxes_nms(self, bboxes: Tensor, labels: Tensor, cfg: ConfigDict) -> Tuple[Tensor, Tensor]: """bboxes nms.""" if 'nms_cfg' in cfg: warning.warn('nms_cfg in test_cfg will be deprecated. ' 'Please rename it as nms') if 'nms' not in cfg: cfg.nms = cfg.nms_cfg if labels.numel() > 0: max_num = cfg.max_per_img bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms) if max_num > 0: bboxes = bboxes[:max_num] labels = labels[keep][:max_num] return bboxes, labels def _decode_heatmap(self, tl_heat: Tensor, br_heat: Tensor, tl_off: Tensor, br_off: Tensor, tl_emb: Optional[Tensor] = None, br_emb: Optional[Tensor] = None, tl_centripetal_shift: Optional[Tensor] = None, br_centripetal_shift: Optional[Tensor] = None, img_meta: Optional[dict] = None, k: int = 100, kernel: int = 3, distance_threshold: float = 0.5, num_dets: int = 1000) -> Tuple[Tensor, Tensor, Tensor]: """Transform outputs into detections raw bbox prediction. Args: tl_heat (Tensor): Top-left corner heatmap for current level with shape (N, num_classes, H, W). br_heat (Tensor): Bottom-right corner heatmap for current level with shape (N, num_classes, H, W). tl_off (Tensor): Top-left corner offset for current level with shape (N, corner_offset_channels, H, W). br_off (Tensor): Bottom-right corner offset for current level with shape (N, corner_offset_channels, H, W). tl_emb (Tensor, Optional): Top-left corner embedding for current level with shape (N, corner_emb_channels, H, W). br_emb (Tensor, Optional): Bottom-right corner embedding for current level with shape (N, corner_emb_channels, H, W). tl_centripetal_shift (Tensor, Optional): Top-left centripetal shift for current level with shape (N, 2, H, W). br_centripetal_shift (Tensor, Optional): Bottom-right centripetal shift for current level with shape (N, 2, H, W). img_meta (dict): Meta information of current image, e.g., image size, scaling factor, etc. k (int): Get top k corner keypoints from heatmap. kernel (int): Max pooling kernel for extract local maximum pixels. distance_threshold (float): Distance threshold. Top-left and bottom-right corner keypoints with feature distance less than the threshold will be regarded as keypoints from same object. num_dets (int): Num of raw boxes before doing nms. Returns: tuple[torch.Tensor]: Decoded output of CornerHead, containing the following Tensors: - bboxes (Tensor): Coords of each box. - scores (Tensor): Scores of each box. - clses (Tensor): Categories of each box. """ with_embedding = tl_emb is not None and br_emb is not None with_centripetal_shift = ( tl_centripetal_shift is not None and br_centripetal_shift is not None) assert with_embedding + with_centripetal_shift == 1 batch, _, height, width = tl_heat.size() if torch.onnx.is_in_onnx_export(): inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2] else: inp_h, inp_w = img_meta['batch_input_shape'][:2] # perform nms on heatmaps tl_heat = get_local_maximum(tl_heat, kernel=kernel) br_heat = get_local_maximum(br_heat, kernel=kernel) tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap( tl_heat, k=k) br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap( br_heat, k=k) # We use repeat instead of expand here because expand is a # shallow-copy function. Thus it could cause unexpected testing result # sometimes. Using expand will decrease about 10% mAP during testing # compared to repeat. tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) tl_off = transpose_and_gather_feat(tl_off, tl_inds) tl_off = tl_off.view(batch, k, 1, 2) br_off = transpose_and_gather_feat(br_off, br_inds) br_off = br_off.view(batch, 1, k, 2) tl_xs = tl_xs + tl_off[..., 0] tl_ys = tl_ys + tl_off[..., 1] br_xs = br_xs + br_off[..., 0] br_ys = br_ys + br_off[..., 1] if with_centripetal_shift: tl_centripetal_shift = transpose_and_gather_feat( tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() br_centripetal_shift = transpose_and_gather_feat( br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] tl_ctys = tl_ys + tl_centripetal_shift[..., 1] br_ctxs = br_xs - br_centripetal_shift[..., 0] br_ctys = br_ys - br_centripetal_shift[..., 1] # all possible boxes based on top k corners (ignoring class) tl_xs *= (inp_w / width) tl_ys *= (inp_h / height) br_xs *= (inp_w / width) br_ys *= (inp_h / height) if with_centripetal_shift: tl_ctxs *= (inp_w / width) tl_ctys *= (inp_h / height) br_ctxs *= (inp_w / width) br_ctys *= (inp_h / height) x_off, y_off = 0, 0 # no crop if not torch.onnx.is_in_onnx_export(): # since `RandomCenterCropPad` is done on CPU with numpy and it's # not dynamic traceable when exporting to ONNX, thus 'border' # does not appears as key in 'img_meta'. As a tmp solution, # we move this 'border' handle part to the postprocess after # finished exporting to ONNX, which is handle in # `mmdet/core/export/model_wrappers.py`. Though difference between # pytorch and exported onnx model, it might be ignored since # comparable performance is achieved between them (e.g. 40.4 vs # 40.6 on COCO val2017, for CornerNet without test-time flip) if 'border' in img_meta: x_off = img_meta['border'][2] y_off = img_meta['border'][0] tl_xs -= x_off tl_ys -= y_off br_xs -= x_off br_ys -= y_off zeros = tl_xs.new_zeros(*tl_xs.size()) tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros) tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros) br_xs = torch.where(br_xs > 0.0, br_xs, zeros) br_ys = torch.where(br_ys > 0.0, br_ys, zeros) bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() if with_centripetal_shift: tl_ctxs -= x_off tl_ctys -= y_off br_ctxs -= x_off br_ctys -= y_off tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), dim=3) area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() rcentral = torch.zeros_like(ct_bboxes) # magic nums from paper section 4.1 mu = torch.ones_like(area_bboxes) / 2.4 mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - bboxes[..., 0]) / 2 rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - bboxes[..., 1]) / 2 area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * (rcentral[..., 3] - rcentral[..., 1])).abs() dists = area_ct_bboxes / area_rcentral tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( ct_bboxes[..., 0] >= rcentral[..., 2]) tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( ct_bboxes[..., 1] >= rcentral[..., 3]) br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( ct_bboxes[..., 2] >= rcentral[..., 2]) br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( ct_bboxes[..., 3] >= rcentral[..., 3]) if with_embedding: tl_emb = transpose_and_gather_feat(tl_emb, tl_inds) tl_emb = tl_emb.view(batch, k, 1) br_emb = transpose_and_gather_feat(br_emb, br_inds) br_emb = br_emb.view(batch, 1, k) dists = torch.abs(tl_emb - br_emb) tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) scores = (tl_scores + br_scores) / 2 # scores for all possible boxes # tl and br should have same class tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) cls_inds = (tl_clses != br_clses) # reject boxes based on distances dist_inds = dists > distance_threshold # reject boxes based on widths and heights width_inds = (br_xs <= tl_xs) height_inds = (br_ys <= tl_ys) # No use `scores[cls_inds]`, instead we use `torch.where` here. # Since only 1-D indices with type 'tensor(bool)' are supported # when exporting to ONNX, any other bool indices with more dimensions # (e.g. 2-D bool tensor) as input parameter in node is invalid negative_scores = -1 * torch.ones_like(scores) scores = torch.where(cls_inds, negative_scores, scores) scores = torch.where(width_inds, negative_scores, scores) scores = torch.where(height_inds, negative_scores, scores) scores = torch.where(dist_inds, negative_scores, scores) if with_centripetal_shift: scores[tl_ctx_inds] = -1 scores[tl_cty_inds] = -1 scores[br_ctx_inds] = -1 scores[br_cty_inds] = -1 scores = scores.view(batch, -1) scores, inds = torch.topk(scores, num_dets) scores = scores.unsqueeze(2) bboxes = bboxes.view(batch, -1, 4) bboxes = gather_feat(bboxes, inds) clses = tl_clses.contiguous().view(batch, -1, 1) clses = gather_feat(clses, inds) return bboxes, scores, clses
49,393
44.524424
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolact_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import List, Optional import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.model import BaseModule, ModuleList from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, OptMultiConfig) from ..layers import fast_nms from ..utils import images_to_levels, multi_apply, select_single_mlvl from ..utils.misc import empty_instances from .anchor_head import AnchorHead from .base_mask_head import BaseMaskHead @MODELS.register_module() class YOLACTHead(AnchorHead): """YOLACT box head used in https://arxiv.org/abs/1904.02689. Note that YOLACT head is a light version of RetinaNet head. Four differences are described as follows: 1. YOLACT box head has three-times fewer anchors. 2. YOLACT box head shares the convs for box and cls branches. 3. YOLACT box head uses OHEM instead of Focal loss. 4. YOLACT box head predicts a set of mask coefficients for each box. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. anchor_generator (:obj:`ConfigDict` or dict): Config dict for anchor generator loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. num_head_convs (int): Number of the conv layers shared by box and cls branches. num_protos (int): Number of the mask coefficients. use_ohem (bool): If true, ``loss_single_OHEM`` will be used for cls loss calculation. If false, ``loss_single`` will be used. conv_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct and config conv layer. norm_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct and config norm layer. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes: int, in_channels: int, anchor_generator: ConfigType = dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs: int = 1, num_protos: int = 32, use_ohem: bool = True, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: OptMultiConfig = dict( type='Xavier', distribution='uniform', bias=0, layer='Conv2d'), **kwargs) -> None: self.num_head_convs = num_head_convs self.num_protos = num_protos self.use_ohem = use_ohem self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.head_convs = ModuleList() for i in range(self.num_head_convs): chn = self.in_channels if i == 0 else self.feat_channels self.head_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.conv_coeff = nn.Conv2d( self.feat_channels, self.num_base_priors * self.num_protos, 3, padding=1) def forward_single(self, x: Tensor) -> tuple: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: - cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. - coeff_pred (Tensor): Mask coefficients for a single scale level, the channels number is num_anchors * num_protos. """ for head_conv in self.head_convs: x = head_conv(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) coeff_pred = self.conv_coeff(x).tanh() return cls_score, bbox_pred, coeff_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], coeff_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the bbox head. When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, otherwise, it follows ``AnchorHead.loss``. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=not self.use_ohem, return_sampling_results=True) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results) = cls_reg_targets if self.use_ohem: num_images = len(batch_img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.OHEMloss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor) else: # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) losses = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) # update `_raw_positive_infos`, which will be used when calling # `get_positive_infos`. self._raw_positive_infos.update(coeff_preds=coeff_preds) return losses def OHEMloss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, anchors: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, avg_factor: int) -> tuple: """Compute loss of a single image. Similar to func:``SSDHead.loss_by_feat_single`` Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor, Tensor]: A tuple of cls loss and bbox loss of one feature map. """ loss_cls_all = self.loss_cls(cls_score, labels, label_weights) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( as_tuple=False).reshape(-1) neg_inds = (labels == self.num_classes).nonzero( as_tuple=False).view(-1) num_pos_samples = pos_inds.size(0) if num_pos_samples == 0: num_neg_samples = neg_inds.size(0) else: num_neg_samples = self.train_cfg['neg_pos_ratio'] * \ num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / avg_factor if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, it # decodes the already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor) return loss_cls[None], loss_bbox def get_positive_infos(self) -> InstanceList: """Get positive information from sampling results. Returns: list[:obj:`InstanceData`]: Positive Information of each image, usually including positive bboxes, positive labels, positive priors, positive coeffs, etc. """ assert len(self._raw_positive_infos) > 0 sampling_results = self._raw_positive_infos['sampling_results'] num_imgs = len(sampling_results) coeff_pred_list = [] for coeff_pred_per_level in self._raw_positive_infos['coeff_preds']: coeff_pred_per_level = \ coeff_pred_per_level.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos) coeff_pred_list.append(coeff_pred_per_level) coeff_preds = torch.cat(coeff_pred_list, dim=1) pos_info_list = [] for idx, sampling_result in enumerate(sampling_results): pos_info = InstanceData() coeff_preds_single = coeff_preds[idx] pos_info.pos_assigned_gt_inds = \ sampling_result.pos_assigned_gt_inds pos_info.pos_inds = sampling_result.pos_inds pos_info.coeffs = coeff_preds_single[sampling_result.pos_inds] pos_info.bboxes = sampling_result.pos_gt_bboxes pos_info_list.append(pos_info) return pos_info_list def predict_by_feat(self, cls_scores, bbox_preds, coeff_preds, batch_img_metas, cfg=None, rescale=True, **kwargs): """Similar to func:``AnchorHead.get_bboxes``, but additionally processes coeff_preds. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) coeff_preds (list[Tensor]): Mask coefficients for each scale level with shape (N, num_anchors * num_protos, H, W) batch_img_metas (list[dict]): Batch image meta info. cfg (:obj:`Config` | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - coeffs (Tensor): the predicted mask coefficients of instance inside the corresponding box has a shape (n, num_protos). """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, device=device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl(cls_scores, img_id) bbox_pred_list = select_single_mlvl(bbox_preds, img_id) coeff_pred_list = select_single_mlvl(coeff_preds, img_id) results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, coeff_preds_list=coeff_pred_list, mlvl_priors=mlvl_priors, img_meta=img_meta, cfg=cfg, rescale=rescale) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], coeff_preds_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigType, rescale: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Similar to func:``AnchorHead._predict_by_feat_single``, but additionally processes coeff_preds_list and uses fast NMS instead of traditional NMS. Args: cls_score_list (list[Tensor]): Box scores for a single scale level Has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas for a single scale level with shape (num_priors * 4, H, W). coeff_preds_list (list[Tensor]): Mask coefficients for a single scale level with shape (num_priors * num_protos, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - coeffs (Tensor): the predicted mask coefficients of instance inside the corresponding box has a shape (n, num_protos). """ assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_priors) cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_coeffs = [] for cls_score, bbox_pred, coeff_pred, priors in \ zip(cls_score_list, bbox_pred_list, coeff_preds_list, mlvl_priors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) coeff_pred = coeff_pred.permute(1, 2, 0).reshape(-1, self.num_protos) if 0 < nms_pre < scores.shape[0]: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) priors = priors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] coeff_pred = coeff_pred[topk_inds, :] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_priors.append(priors) mlvl_scores.append(scores) mlvl_coeffs.append(coeff_pred) bbox_pred = torch.cat(mlvl_bbox_preds) priors = torch.cat(mlvl_valid_priors) multi_bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=img_shape) multi_scores = torch.cat(mlvl_scores) multi_coeffs = torch.cat(mlvl_coeffs) return self._bbox_post_process( multi_bboxes=multi_bboxes, multi_scores=multi_scores, multi_coeffs=multi_coeffs, cfg=cfg, rescale=rescale, img_meta=img_meta) def _bbox_post_process(self, multi_bboxes: Tensor, multi_scores: Tensor, multi_coeffs: Tensor, cfg: ConfigType, rescale: bool = False, img_meta: Optional[dict] = None, **kwargs) -> InstanceData: """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: multi_bboxes (Tensor): Predicted bbox that concat all levels. multi_scores (Tensor): Bbox scores that concat all levels. multi_coeffs (Tensor): Mask coefficients that concat all levels. cfg (ConfigDict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default to False. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - coeffs (Tensor): the predicted mask coefficients of instance inside the corresponding box has a shape (n, num_protos). """ if rescale: assert img_meta.get('scale_factor') is not None multi_bboxes /= multi_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) # mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = multi_scores.new_zeros(multi_scores.shape[0], 1) multi_scores = torch.cat([multi_scores, padding], dim=1) det_bboxes, det_labels, det_coeffs = fast_nms( multi_bboxes, multi_scores, multi_coeffs, cfg.score_thr, cfg.iou_thr, cfg.top_k, cfg.max_per_img) results = InstanceData() results.bboxes = det_bboxes[:, :4] results.scores = det_bboxes[:, -1] results.labels = det_labels results.coeffs = det_coeffs return results @MODELS.register_module() class YOLACTProtonet(BaseMaskHead): """YOLACT mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask prototypes for YOLACT. Args: in_channels (int): Number of channels in the input feature map. proto_channels (tuple[int]): Output channels of protonet convs. proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. include_last_relu (bool): If keep the last relu of protonet. num_protos (int): Number of prototypes. num_classes (int): Number of categories excluding the background category. loss_mask_weight (float): Reweight the mask loss by this factor. max_masks_to_train (int): Maximum number of masks to train for each image. with_seg_branch (bool): Whether to apply a semantic segmentation branch and calculate loss during training to increase performance with no speed penalty. Defaults to True. loss_segm (:obj:`ConfigDict` or dict, optional): Config of semantic segmentation loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of head. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int = 256, proto_channels: tuple = (256, 256, 256, None, 256, 32), proto_kernel_sizes: tuple = (3, 3, 3, -2, 3, 1), include_last_relu: bool = True, num_protos: int = 32, loss_mask_weight: float = 1.0, max_masks_to_train: int = 100, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, with_seg_branch: bool = True, loss_segm: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), init_cfg=dict( type='Xavier', distribution='uniform', override=dict(name='protonet')) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.proto_channels = proto_channels self.proto_kernel_sizes = proto_kernel_sizes self.include_last_relu = include_last_relu # Segmentation branch self.with_seg_branch = with_seg_branch self.segm_branch = SegmentationModule( num_classes=num_classes, in_channels=in_channels) \ if with_seg_branch else None self.loss_segm = MODELS.build(loss_segm) if with_seg_branch else None self.loss_mask_weight = loss_mask_weight self.num_protos = num_protos self.num_classes = num_classes self.max_masks_to_train = max_masks_to_train self.train_cfg = train_cfg self.test_cfg = test_cfg self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" # Possible patterns: # ( 256, 3) -> conv # ( 256,-2) -> deconv # (None,-2) -> bilinear interpolate in_channels = self.in_channels protonets = ModuleList() for num_channels, kernel_size in zip(self.proto_channels, self.proto_kernel_sizes): if kernel_size > 0: layer = nn.Conv2d( in_channels, num_channels, kernel_size, padding=kernel_size // 2) else: if num_channels is None: layer = InterpolateModule( scale_factor=-kernel_size, mode='bilinear', align_corners=False) else: layer = nn.ConvTranspose2d( in_channels, num_channels, -kernel_size, padding=kernel_size // 2) protonets.append(layer) protonets.append(nn.ReLU(inplace=True)) in_channels = num_channels if num_channels is not None \ else in_channels if not self.include_last_relu: protonets = protonets[:-1] self.protonet = nn.Sequential(*protonets) def forward(self, x: tuple, positive_infos: InstanceList) -> tuple: """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tuple[Tensor]): Feature from the upstream network, which is a 4D-tensor. positive_infos (List[:obj:``InstanceData``]): Positive information that calculate from detect head. Returns: tuple: Predicted instance segmentation masks and semantic segmentation map. """ # YOLACT used single feature map to get segmentation masks single_x = x[0] # YOLACT segmentation branch, if not training or segmentation branch # is None, will not process the forward function. if self.segm_branch is not None and self.training: segm_preds = self.segm_branch(single_x) else: segm_preds = None # YOLACT mask head prototypes = self.protonet(single_x) prototypes = prototypes.permute(0, 2, 3, 1).contiguous() num_imgs = single_x.size(0) mask_pred_list = [] for idx in range(num_imgs): cur_prototypes = prototypes[idx] pos_coeffs = positive_infos[idx].coeffs # Linearly combine the prototypes with the mask coefficients mask_preds = cur_prototypes @ pos_coeffs.t() mask_preds = torch.sigmoid(mask_preds) mask_pred_list.append(mask_preds) return mask_pred_list, segm_preds def loss_by_feat(self, mask_preds: List[Tensor], segm_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], positive_infos: InstanceList, **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (list[Tensor]): List of predicted prototypes, each has shape (num_classes, H, W). segm_preds (Tensor): Predicted semantic segmentation map with shape (N, num_classes, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Information of positive samples of each image that are assigned in detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert positive_infos is not None, \ 'positive_infos should not be None in `YOLACTProtonet`' losses = dict() # crop croped_mask_pred = self.crop_mask_preds(mask_preds, batch_img_metas, positive_infos) loss_mask = [] loss_segm = [] num_imgs, _, mask_h, mask_w = segm_preds.size() assert num_imgs == len(croped_mask_pred) segm_avg_factor = num_imgs * mask_h * mask_w total_pos = 0 if self.segm_branch is not None: assert segm_preds is not None for idx in range(num_imgs): img_meta = batch_img_metas[idx] (mask_preds, pos_mask_targets, segm_targets, num_pos, gt_bboxes_for_reweight) = self._get_targets_single( croped_mask_pred[idx], segm_preds[idx], batch_gt_instances[idx], positive_infos[idx]) # segmentation loss if self.with_seg_branch: if segm_targets is None: loss = segm_preds[idx].sum() * 0. else: loss = self.loss_segm( segm_preds[idx], segm_targets, avg_factor=segm_avg_factor) loss_segm.append(loss) # mask loss total_pos += num_pos if num_pos == 0 or pos_mask_targets is None: loss = mask_preds.sum() * 0. else: mask_preds = torch.clamp(mask_preds, 0, 1) loss = F.binary_cross_entropy( mask_preds, pos_mask_targets, reduction='none') * self.loss_mask_weight h, w = img_meta['img_shape'][:2] gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - gt_bboxes_for_reweight[:, 0]) / w gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - gt_bboxes_for_reweight[:, 1]) / h loss = loss.mean(dim=(1, 2)) / gt_bboxes_width / gt_bboxes_height loss = torch.sum(loss) loss_mask.append(loss) if total_pos == 0: total_pos += 1 # avoid nan loss_mask = [x / total_pos for x in loss_mask] losses.update(loss_mask=loss_mask) if self.with_seg_branch: losses.update(loss_segm=loss_segm) return losses def _get_targets_single(self, mask_preds: Tensor, segm_pred: Tensor, gt_instances: InstanceData, positive_info: InstanceData): """Compute targets for predictions of single image. Args: mask_preds (Tensor): Predicted prototypes with shape (num_classes, H, W). segm_pred (Tensor): Predicted semantic segmentation map with shape (num_classes, H, W). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. positive_info (:obj:`InstanceData`): Information of positive samples that are assigned in detection head. It usually contains following keys. - pos_assigned_gt_inds (Tensor): Assigner GT indexes of positive proposals, has shape (num_pos, ) - pos_inds (Tensor): Positive index of image, has shape (num_pos, ). - coeffs (Tensor): Positive mask coefficients with shape (num_pos, num_protos). - bboxes (Tensor): Positive bboxes with shape (num_pos, 4) Returns: tuple: Usually returns a tuple containing learning targets. - mask_preds (Tensor): Positive predicted mask with shape (num_pos, mask_h, mask_w). - pos_mask_targets (Tensor): Positive mask targets with shape (num_pos, mask_h, mask_w). - segm_targets (Tensor): Semantic segmentation targets with shape (num_classes, segm_h, segm_w). - num_pos (int): Positive numbers. - gt_bboxes_for_reweight (Tensor): GT bboxes that match to the positive priors has shape (num_pos, 4). """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels device = gt_bboxes.device gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device).float() if gt_masks.size(0) == 0: return mask_preds, None, None, 0, None # process with semantic segmentation targets if segm_pred is not None: num_classes, segm_h, segm_w = segm_pred.size() with torch.no_grad(): downsampled_masks = F.interpolate( gt_masks.unsqueeze(0), (segm_h, segm_w), mode='bilinear', align_corners=False).squeeze(0) downsampled_masks = downsampled_masks.gt(0.5).float() segm_targets = torch.zeros_like(segm_pred, requires_grad=False) for obj_idx in range(downsampled_masks.size(0)): segm_targets[gt_labels[obj_idx] - 1] = torch.max( segm_targets[gt_labels[obj_idx] - 1], downsampled_masks[obj_idx]) else: segm_targets = None # process with mask targets pos_assigned_gt_inds = positive_info.pos_assigned_gt_inds num_pos = pos_assigned_gt_inds.size(0) # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if num_pos > self.max_masks_to_train: perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] mask_preds = mask_preds[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train gt_bboxes_for_reweight = gt_bboxes[pos_assigned_gt_inds] mask_h, mask_w = mask_preds.shape[-2:] gt_masks = F.interpolate( gt_masks.unsqueeze(0), (mask_h, mask_w), mode='bilinear', align_corners=False).squeeze(0) gt_masks = gt_masks.gt(0.5).float() pos_mask_targets = gt_masks[pos_assigned_gt_inds] return (mask_preds, pos_mask_targets, segm_targets, num_pos, gt_bboxes_for_reweight) def crop_mask_preds(self, mask_preds: List[Tensor], batch_img_metas: List[dict], positive_infos: InstanceList) -> list: """Crop predicted masks by zeroing out everything not in the predicted bbox. Args: mask_preds (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Positive information that calculate from detect head. Returns: list: The cropped masks. """ croped_mask_preds = [] for img_meta, mask_preds, cur_info in zip(batch_img_metas, mask_preds, positive_infos): bboxes_for_cropping = copy.deepcopy(cur_info.bboxes) h, w = img_meta['img_shape'][:2] bboxes_for_cropping[:, 0::2] /= w bboxes_for_cropping[:, 1::2] /= h mask_preds = self.crop_single(mask_preds, bboxes_for_cropping) mask_preds = mask_preds.permute(2, 0, 1).contiguous() croped_mask_preds.append(mask_preds) return croped_mask_preds def crop_single(self, masks: Tensor, boxes: Tensor, padding: int = 1) -> Tensor: """Crop single predicted masks by zeroing out everything not in the predicted bbox. Args: masks (Tensor): Predicted prototypes, has shape [H, W, N]. boxes (Tensor): Bbox coords in relative point form with shape [N, 4]. padding (int): Image padding size. Return: Tensor: The cropped masks. """ h, w, n = masks.size() x1, x2 = self.sanitize_coordinates( boxes[:, 0], boxes[:, 2], w, padding, cast=False) y1, y2 = self.sanitize_coordinates( boxes[:, 1], boxes[:, 3], h, padding, cast=False) rows = torch.arange( w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n) cols = torch.arange( h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n) masks_left = rows >= x1.view(1, 1, -1) masks_right = rows < x2.view(1, 1, -1) masks_up = cols >= y1.view(1, 1, -1) masks_down = cols < y2.view(1, 1, -1) crop_mask = masks_left * masks_right * masks_up * masks_down return masks * crop_mask.float() def sanitize_coordinates(self, x1: Tensor, x2: Tensor, img_size: int, padding: int = 0, cast: bool = True) -> tuple: """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size. Also converts from relative to absolute coordinates and casts the results to long tensors. Warning: this does things in-place behind the scenes so copy if necessary. Args: x1 (Tensor): shape (N, ). x2 (Tensor): shape (N, ). img_size (int): Size of the input image. padding (int): x1 >= padding, x2 <= image_size-padding. cast (bool): If cast is false, the result won't be cast to longs. Returns: tuple: - x1 (Tensor): Sanitized _x1. - x2 (Tensor): Sanitized _x2. """ x1 = x1 * img_size x2 = x2 * img_size if cast: x1 = x1.long() x2 = x2.long() x1 = torch.min(x1, x2) x2 = torch.max(x1, x2) x1 = torch.clamp(x1 - padding, min=0) x2 = torch.clamp(x2 + padding, max=img_size) return x1, x2 def predict_by_feat(self, mask_preds: List[Tensor], segm_preds: Tensor, results_list: InstanceList, batch_img_metas: List[dict], rescale: bool = True, **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mask_preds (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). results_list (List[:obj:``InstanceData``]): BBoxHead results. batch_img_metas (list[dict]): Meta information of all images. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ assert len(mask_preds) == len(results_list) == len(batch_img_metas) croped_mask_pred = self.crop_mask_preds(mask_preds, batch_img_metas, results_list) for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] results = results_list[img_id] bboxes = results.bboxes mask_preds = croped_mask_pred[img_id] if bboxes.shape[0] == 0 or mask_preds.shape[0] == 0: results_list[img_id] = empty_instances( [img_meta], bboxes.device, task_type='mask', instance_results=[results])[0] else: im_mask = self._predict_by_feat_single( mask_preds=croped_mask_pred[img_id], bboxes=bboxes, img_meta=img_meta, rescale=rescale) results.masks = im_mask return results_list def _predict_by_feat_single(self, mask_preds: Tensor, bboxes: Tensor, img_meta: dict, rescale: bool, cfg: OptConfigType = None): """Transform a single image's features extracted from the head into mask results. Args: mask_preds (Tensor): Predicted prototypes, has shape [H, W, N]. bboxes (Tensor): Bbox coords in relative point form with shape [N, 4]. img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ cfg = self.test_cfg if cfg is None else cfg scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) img_h, img_w = img_meta['ori_shape'][:2] if rescale: # in-placed rescale the bboxes scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes /= scale_factor else: w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1] img_h = np.round(img_h * h_scale.item()).astype(np.int32) img_w = np.round(img_w * w_scale.item()).astype(np.int32) masks = F.interpolate( mask_preds.unsqueeze(0), (img_h, img_w), mode='bilinear', align_corners=False).squeeze(0) > cfg.mask_thr if cfg.mask_thr_binary < 0: # for visualization and debugging masks = (masks * 255).to(dtype=torch.uint8) return masks class SegmentationModule(BaseModule): """YOLACT segmentation branch used in <https://arxiv.org/abs/1904.02689>`_ In mmdet v2.x `segm_loss` is calculated in YOLACTSegmHead, while in mmdet v3.x `SegmentationModule` is used to obtain the predicted semantic segmentation map and `segm_loss` is calculated in YOLACTProtonet. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int = 256, init_cfg: ConfigType = dict( type='Xavier', distribution='uniform', override=dict(name='segm_conv')) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.segm_conv = nn.Conv2d( self.in_channels, self.num_classes, kernel_size=1) def forward(self, x: Tensor) -> Tensor: """Forward feature from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: Predicted semantic segmentation map with shape (N, num_classes, H, W). """ return self.segm_conv(x) class InterpolateModule(BaseModule): """This is a module version of F.interpolate. Any arguments you give it just get passed along for the ride. """ def __init__(self, *args, init_cfg=None, **kwargs) -> None: super().__init__(init_cfg=init_cfg) self.args = args self.kwargs = kwargs def forward(self, x: Tensor) -> Tensor: """Forward features from the upstream network. Args: x (Tensor): Feature from the upstream network, which is a 4D-tensor. Returns: Tensor: A 4D-tensor feature map. """ return F.interpolate(x, *self.args, **self.kwargs)
50,619
41.39531
79
py
ERD
ERD-main/mmdet/models/dense_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_free_head import AnchorFreeHead from .anchor_head import AnchorHead from .atss_head import ATSSHead from .autoassign_head import AutoAssignHead from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead from .centernet_head import CenterNetHead from .centernet_update_head import CenterNetUpdateHead from .centripetal_head import CentripetalHead from .condinst_head import CondInstBboxHead, CondInstMaskHead from .conditional_detr_head import ConditionalDETRHead from .corner_head import CornerHead from .dab_detr_head import DABDETRHead from .ddod_head import DDODHead from .deformable_detr_head import DeformableDETRHead from .detr_head import DETRHead from .dino_head import DINOHead from .embedding_rpn_head import EmbeddingRPNHead from .fcos_head import FCOSHead from .fovea_head import FoveaHead from .free_anchor_retina_head import FreeAnchorRetinaHead from .fsaf_head import FSAFHead from .ga_retina_head import GARetinaHead from .ga_rpn_head import GARPNHead from .gfl_head import GFLHead from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead from .lad_head import LADHead from .ld_head import LDHead from .mask2former_head import Mask2FormerHead from .maskformer_head import MaskFormerHead from .nasfcos_head import NASFCOSHead from .paa_head import PAAHead from .pisa_retinanet_head import PISARetinaHead from .pisa_ssd_head import PISASSDHead from .reppoints_head import RepPointsHead from .retina_head import RetinaHead from .retina_sepbn_head import RetinaSepBNHead from .rpn_head import RPNHead from .rtmdet_head import RTMDetHead, RTMDetSepBNHead from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead from .sabl_retina_head import SABLRetinaHead from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead from .solov2_head import SOLOV2Head from .ssd_head import SSDHead from .tood_head import TOODHead from .vfnet_head import VFNetHead from .yolact_head import YOLACTHead, YOLACTProtonet from .yolo_head import YOLOV3Head from .yolof_head import YOLOFHead from .yolox_head import YOLOXHead from .gfl_head_increment_erd import GFLHeadIncrementERD __all__ = [ 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead', 'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead', 'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead', 'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead', 'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead', 'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead', 'DABDETRHead', 'GFLHeadIncrementERD' ]
3,283
45.253521
79
py
ERD
ERD-main/mmdet/models/dense_heads/base_dense_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from abc import ABCMeta, abstractmethod from inspect import signature from typing import List, Optional, Tuple import torch from mmcv.ops import batched_nms from mmengine.config import ConfigDict from mmengine.model import BaseModule, constant_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.structures import SampleList from mmdet.structures.bbox import (cat_boxes, get_box_tensor, get_box_wh, scale_boxes) from mmdet.utils import InstanceList, OptMultiConfig from ..test_time_augs import merge_aug_results from ..utils import (filter_scores_and_topk, select_single_mlvl, unpack_gt_instances) class BaseDenseHead(BaseModule, metaclass=ABCMeta): """Base class for DenseHeads. 1. The ``init_weights`` method is used to initialize densehead's model parameters. After detector initialization, ``init_weights`` is triggered when ``detector.init_weights()`` is called externally. 2. The ``loss`` method is used to calculate the loss of densehead, which includes two steps: (1) the densehead model performs forward propagation to obtain the feature maps (2) The ``loss_by_feat`` method is called based on the feature maps to calculate the loss. .. code:: text loss(): forward() -> loss_by_feat() 3. The ``predict`` method is used to predict detection results, which includes two steps: (1) the densehead model performs forward propagation to obtain the feature maps (2) The ``predict_by_feat`` method is called based on the feature maps to predict detection results including post-processing. .. code:: text predict(): forward() -> predict_by_feat() 4. The ``loss_and_predict`` method is used to return loss and detection results at the same time. It will call densehead's ``forward``, ``loss_by_feat`` and ``predict_by_feat`` methods in order. If one-stage is used as RPN, the densehead needs to return both losses and predictions. This predictions is used as the proposal of roihead. .. code:: text loss_and_predict(): forward() -> loss_by_feat() -> predict_by_feat() """ def __init__(self, init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) # `_raw_positive_infos` will be used in `get_positive_infos`, which # can get positive information. self._raw_positive_infos = dict() def init_weights(self) -> None: """Initialize the weights.""" super().init_weights() # avoid init_cfg overwrite the initialization of `conv_offset` for m in self.modules(): # DeformConv2dPack, ModulatedDeformConv2dPack if hasattr(m, 'conv_offset'): constant_init(m.conv_offset, 0) def get_positive_infos(self) -> InstanceList: """Get positive information from sampling results. Returns: list[:obj:`InstanceData`]: Positive information of each image, usually including positive bboxes, positive labels, positive priors, etc. """ if len(self._raw_positive_infos) == 0: return None sampling_results = self._raw_positive_infos.get( 'sampling_results', None) assert sampling_results is not None positive_infos = [] for sampling_result in enumerate(sampling_results): pos_info = InstanceData() pos_info.bboxes = sampling_result.pos_gt_bboxes pos_info.labels = sampling_result.pos_gt_labels pos_info.priors = sampling_result.pos_priors pos_info.pos_assigned_gt_inds = \ sampling_result.pos_assigned_gt_inds pos_info.pos_inds = sampling_result.pos_inds positive_infos.append(pos_info) return positive_infos def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ outs = self(x) outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses @abstractmethod def loss_by_feat(self, **kwargs) -> dict: """Calculate the loss based on the features extracted by the detection head.""" pass def loss_and_predict( self, x: Tuple[Tensor], batch_data_samples: SampleList, proposal_cfg: Optional[ConfigDict] = None ) -> Tuple[dict, InstanceList]: """Perform forward propagation of the head, then calculate loss and predictions from the features and data samples. Args: x (tuple[Tensor]): Features from FPN. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains the meta information of each image and corresponding annotations. proposal_cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. Returns: tuple: the return value is a tuple contains: - losses: (dict[str, Tensor]): A dictionary of loss components. - predictions (list[:obj:`InstanceData`]): Detection results of each image after the post process. """ outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(x) loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, cfg=proposal_cfg) return losses, predictions def predict(self, x: Tuple[Tensor], batch_data_samples: SampleList, rescale: bool = False) -> InstanceList: """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] outs = self(x) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return predictions def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=cls_scores[0].dtype, device=cls_scores[0].device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl( cls_scores, img_id, detach=True) bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) if with_score_factors: score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) else: score_factor_list = [None for _ in range(num_levels)] results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, mlvl_priors=mlvl_priors, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] dim = self.bbox_coder.encode_size bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. score_thr = cfg.get('score_thr', 0) results = filter_scores_and_topk( scores, score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] if with_score_factors: score_factor = score_factor[keep_idxs] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_priors.append(priors) mlvl_scores.append(scores) mlvl_labels.append(labels) if with_score_factors: mlvl_score_factors.append(score_factor) bbox_pred = torch.cat(mlvl_bbox_preds) priors = cat_boxes(mlvl_valid_priors) bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape) results = InstanceData() results.bboxes = bboxes results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) if with_score_factors: results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def _bbox_post_process(self, results: InstanceData, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True, img_meta: Optional[dict] = None) -> InstanceData: """bbox post-processing method. The boxes would be rescaled to the original image scale and do the nms operation. Usually `with_nms` is False is used for aug test. Args: results (:obj:`InstaceData`): Detection instance results, each item has shape (num_bboxes, ). cfg (ConfigDict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default to False. with_nms (bool): If True, do nms before return boxes. Default to True. img_meta (dict, optional): Image meta info. Defaults to None. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if rescale: assert img_meta.get('scale_factor') is not None scale_factor = [1 / s for s in img_meta['scale_factor']] results.bboxes = scale_boxes(results.bboxes, scale_factor) if hasattr(results, 'score_factors'): # TODO: Add sqrt operation in order to be consistent with # the paper. score_factors = results.pop('score_factors') results.scores = results.scores * score_factors # filter small size bboxes if cfg.get('min_bbox_size', -1) >= 0: w, h = get_box_wh(results.bboxes) valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) if not valid_mask.all(): results = results[valid_mask] # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg if with_nms and results.bboxes.numel() > 0: bboxes = get_box_tensor(results.bboxes) det_bboxes, keep_idxs = batched_nms(bboxes, results.scores, results.labels, cfg.nms) results = results[keep_idxs] # some nms would reweight the score, such as softnms results.scores = det_bboxes[:, -1] results = results[:cfg.max_per_img] return results def aug_test(self, aug_batch_feats, aug_batch_img_metas, rescale=False, with_ori_nms=False, **kwargs): """Test function with test time augmentation. Args: aug_batch_feats (list[tuple[Tensor]]): The outer list indicates test-time augmentations and inner tuple indicate the multi-level feats from FPN, each Tensor should have a shape (B, C, H, W), aug_batch_img_metas (list[list[dict]]): Meta information of images under the different test-time augs (multiscale, flip, etc.). The outer list indicate the rescale (bool, optional): Whether to rescale the results. Defaults to False. with_ori_nms (bool): Whether execute the nms in original head. Defaults to False. It will be `True` when the head is adopted as `rpn_head`. Returns: list(obj:`InstanceData`): Detection results of the input images. Each item usually contains\ following keys. - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Labels of bboxes, has a shape (num_instances,). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ # TODO: remove this for detr and deformdetr sig_of_get_results = signature(self.get_results) get_results_args = [ p.name for p in sig_of_get_results.parameters.values() ] get_results_single_sig = signature(self._get_results_single) get_results_single_sig_args = [ p.name for p in get_results_single_sig.parameters.values() ] assert ('with_nms' in get_results_args) and \ ('with_nms' in get_results_single_sig_args), \ f'{self.__class__.__name__}' \ 'does not support test-time augmentation ' num_imgs = len(aug_batch_img_metas[0]) aug_batch_results = [] for x, img_metas in zip(aug_batch_feats, aug_batch_img_metas): outs = self.forward(x) batch_instance_results = self.get_results( *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=False, with_nms=with_ori_nms, **kwargs) aug_batch_results.append(batch_instance_results) # after merging, bboxes will be rescaled to the original image batch_results = merge_aug_results(aug_batch_results, aug_batch_img_metas) final_results = [] for img_id in range(num_imgs): results = batch_results[img_id] det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores, results.labels, self.test_cfg.nms) results = results[keep_idxs] # some nms operation may reweight the score such as softnms results.scores = det_bboxes[:, -1] results = results[:self.test_cfg.max_per_img] if rescale: # all results have been mapped to the original scale # in `merge_aug_results`, so just pass pass else: # map to the first aug image scale scale_factor = results.bboxes.new_tensor( aug_batch_img_metas[0][img_id]['scale_factor']) results.bboxes = \ results.bboxes * scale_factor final_results.append(results) return final_results
24,198
40.866782
79
py
ERD
ERD-main/mmdet/models/dense_heads/ddod_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale from mmengine.model import bias_init_with_prob, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from ..task_modules.prior_generators import anchor_inside_flags from ..utils import images_to_levels, multi_apply, unmap from .anchor_head import AnchorHead EPS = 1e-12 @MODELS.register_module() class DDODHead(AnchorHead): """Detection Head of `DDOD <https://arxiv.org/abs/2107.02963>`_. DDOD head decomposes conjunctions lying in most current one-stage detectors via label assignment disentanglement, spatial feature disentanglement, and pyramid supervision disentanglement. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): The number of stacked Conv. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. use_dcn (bool): Use dcn, Same as ATSS when False. Defaults to True. norm_cfg (:obj:`ConfigDict` or dict): Normal config of ddod head. Defaults to dict(type='GN', num_groups=32, requires_grad=True). loss_iou (:obj:`ConfigDict` or dict): Config of IoU loss. Defaults to dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0). """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, use_dcn: bool = True, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), loss_iou: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), **kwargs) -> None: self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.use_dcn = use_dcn super().__init__(num_classes, in_channels, **kwargs) if self.train_cfg: self.cls_assigner = TASK_UTILS.build(self.train_cfg['assigner']) self.reg_assigner = TASK_UTILS.build( self.train_cfg['reg_assigner']) self.loss_iou = MODELS.build(loss_iou) def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=dict(type='DCN', deform_groups=1) if i == 0 and self.use_dcn else self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=dict(type='DCN', deform_groups=1) if i == 0 and self.use_dcn else self.conv_cfg, norm_cfg=self.norm_cfg)) self.atss_cls = nn.Conv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 3, padding=1) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, 3, padding=1) self.atss_iou = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.prior_generator.strides]) # we use the global list in loss self.cls_num_pos_samples_per_level = [ 0. for _ in range(len(self.prior_generator.strides)) ] self.reg_num_pos_samples_per_level = [ 0. for _ in range(len(self.prior_generator.strides)) ] def init_weights(self) -> None: """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) normal_init(self.atss_reg, std=0.01) normal_init(self.atss_iou, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.atss_cls, std=0.01, bias=bias_cls) def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores, bbox predictions, and iou predictions. - cls_scores (list[Tensor]): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number is \ num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number is \ num_base_priors * 4. - iou_preds (list[Tensor]): IoU scores for all scale levels, \ each is a 4D-tensor, the channels number is num_base_priors * 1. """ return multi_apply(self.forward_single, x, self.scales) def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: - cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single \ scale level, the channels number is num_base_priors * 4. - iou_pred (Tensor): Iou for a single scale level, the \ channel number is (N, num_base_priors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() iou_pred = self.atss_iou(reg_feat) return cls_score, bbox_pred, iou_pred def loss_cls_by_feat_single(self, cls_score: Tensor, labels: Tensor, label_weights: Tensor, reweight_factor: List[float], avg_factor: float) -> Tuple[Tensor]: """Compute cls loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_base_priors * num_classes, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) reweight_factor (List[float]): Reweight factor for cls and reg loss. avg_factor (float): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor]: A tuple of loss components. """ cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) return reweight_factor * loss_cls, def loss_reg_by_feat_single(self, anchors: Tensor, bbox_pred: Tensor, iou_pred: Tensor, labels, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, reweight_factor: List[float], avg_factor: float) -> Tuple[Tensor, Tensor]: """Compute reg loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_base_priors * 4, H, W). iou_pred (Tensor): Iou for a single scale level, the channel number is (N, num_base_priors * 1, H, W). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) reweight_factor (List[float]): Reweight factor for cls and reg loss. avg_factor (float): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: Tuple[Tensor, Tensor]: A tuple of loss components. """ anchors = anchors.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, ) bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) iou_targets = label_weights.new_zeros(labels.shape) iou_weights = label_weights.new_zeros(labels.shape) iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero( as_tuple=False)] = 1. # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) pos_decode_bbox_targets = self.bbox_coder.decode( pos_anchors, pos_bbox_targets) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, avg_factor=avg_factor) iou_targets[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) loss_iou = self.loss_iou( iou_pred, iou_targets, iou_weights, avg_factor=avg_factor) else: loss_bbox = bbox_pred.sum() * 0 loss_iou = iou_pred.sum() * 0 return reweight_factor * loss_bbox, reweight_factor * loss_iou def calc_reweight_factor(self, labels_list: List[Tensor]) -> List[float]: """Compute reweight_factor for regression and classification loss.""" # get pos samples for each level bg_class_ind = self.num_classes for ii, each_level_label in enumerate(labels_list): pos_inds = ((each_level_label >= 0) & (each_level_label < bg_class_ind)).nonzero( as_tuple=False).squeeze(1) self.cls_num_pos_samples_per_level[ii] += len(pos_inds) # get reweight factor from 1 ~ 2 with bilinear interpolation min_pos_samples = min(self.cls_num_pos_samples_per_level) max_pos_samples = max(self.cls_num_pos_samples_per_level) interval = 1. / (max_pos_samples - min_pos_samples + 1e-10) reweight_factor_per_level = [] for pos_samples in self.cls_num_pos_samples_per_level: factor = 2. - (pos_samples - min_pos_samples) * interval reweight_factor_per_level.append(factor) return reweight_factor_per_level def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], iou_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_base_priors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_base_priors * 4, H, W) iou_preds (list[Tensor]): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) # calculate common vars for cls and reg assigners at once targets_com = self.process_predictions_and_anchors( anchor_list, valid_flag_list, cls_scores, bbox_preds, batch_img_metas, batch_gt_instances_ignore) (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances_ignore) = targets_com # classification branch assigner cls_targets = self.get_cls_targets( anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() avg_factor = max(avg_factor, 1.0) reweight_factor_per_level = self.calc_reweight_factor(labels_list) cls_losses_cls, = multi_apply( self.loss_cls_by_feat_single, cls_scores, labels_list, label_weights_list, reweight_factor_per_level, avg_factor=avg_factor) # regression branch assigner reg_targets = self.get_reg_targets( anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() avg_factor = max(avg_factor, 1.0) reweight_factor_per_level = self.calc_reweight_factor(labels_list) reg_losses_bbox, reg_losses_iou = multi_apply( self.loss_reg_by_feat_single, reg_anchor_list, bbox_preds, iou_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, reweight_factor_per_level, avg_factor=avg_factor) return dict( loss_cls=cls_losses_cls, loss_bbox=reg_losses_bbox, loss_iou=reg_losses_iou) def process_predictions_and_anchors( self, anchor_list: List[List[Tensor]], valid_flag_list: List[List[Tensor]], cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> tuple: """Compute common vars for regression and classification targets. Args: anchor_list (List[List[Tensor]]): anchors of each image. valid_flag_list (List[List[Tensor]]): Valid flags of each image. cls_scores (List[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Return: tuple[Tensor]: A tuple of common loss vars. """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs anchor_list_ = [] valid_flag_list_ = [] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list_.append(torch.cat(anchor_list[i])) valid_flag_list_.append(torch.cat(valid_flag_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None for _ in range(num_imgs)] num_levels = len(cls_scores) cls_score_list = [] bbox_pred_list = [] mlvl_cls_score_list = [ cls_score.permute(0, 2, 3, 1).reshape( num_imgs, -1, self.num_base_priors * self.cls_out_channels) for cls_score in cls_scores ] mlvl_bbox_pred_list = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_base_priors * 4) for bbox_pred in bbox_preds ] for i in range(num_imgs): mlvl_cls_tensor_list = [ mlvl_cls_score_list[j][i] for j in range(num_levels) ] mlvl_bbox_tensor_list = [ mlvl_bbox_pred_list[j][i] for j in range(num_levels) ] cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0) cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0) cls_score_list.append(cat_mlvl_cls_score) bbox_pred_list.append(cat_mlvl_bbox_pred) return (anchor_list_, valid_flag_list_, num_level_anchors_list, cls_score_list, bbox_pred_list, batch_gt_instances_ignore) def get_cls_targets(self, anchor_list: List[Tensor], valid_flag_list: List[Tensor], num_level_anchors_list: List[int], cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get cls targets for DDOD head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. num_level_anchors_list (list[Tensor]): Number of anchors of each scale level of all image. cls_score_list (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_pred_list (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Return: tuple[Tensor]: A tuple of cls targets components. """ (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, cls_score_list, bbox_pred_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs, is_cls_assigner=True) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) label_weights_list = images_to_levels(all_label_weights, num_level_anchors_list[0]) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors_list[0]) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors_list[0]) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def get_reg_targets(self, anchor_list: List[Tensor], valid_flag_list: List[Tensor], num_level_anchors_list: List[int], cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Get reg targets for DDOD head. This method is almost the same as `AnchorHead.get_targets()` when is_cls_assigner is False. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. Args: anchor_list (list[Tensor]): anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. num_level_anchors_list (list[Tensor]): Number of anchors of each scale level of all image. cls_score_list (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. bbox_pred_list (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Return: tuple[Tensor]: A tuple of reg targets components. """ (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, anchor_list, valid_flag_list, cls_score_list, bbox_pred_list, num_level_anchors_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs, is_cls_assigner=False) # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) label_weights_list = images_to_levels(all_label_weights, num_level_anchors_list[0]) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors_list[0]) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors_list[0]) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, cls_scores: Tensor, bbox_preds: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True, is_cls_assigner: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_base_priors, 4). valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_base_priors,). cls_scores (Tensor): Classification scores for all scale levels of the image. bbox_preds (Tensor): Box energies / deltas for all scale levels of the image. num_level_anchors (List[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. is_cls_assigner (bool): Classification or regression. Defaults to True. Returns: tuple: N is the number of total anchors in the image. - anchors (Tensor): all anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with \ shape (N, ). - label_weights (Tensor): Label weights of all anchor in the \ image with shape (N, ). - bbox_targets (Tensor): BBox targets of all anchors in the \ image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the \ image with shape (N, 4) - pos_inds (Tensor): Indices of positive anchor with shape \ (num_pos, ). - neg_inds (Tensor): Indices of negative anchor with shape \ (num_neg, ). - sampling_result (:obj:`SamplingResult`): Sampling results. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) bbox_preds_valid = bbox_preds[inside_flags, :] cls_scores_valid = cls_scores[inside_flags, :] assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner # decode prediction out of assigner bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid) pred_instances = InstanceData( priors=anchors, bboxes=bbox_preds_valid, scores=cls_scores_valid) assign_result = assigner.assign( pred_instances=pred_instances, num_level_priors=num_level_anchors_inside, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_num_level_anchors_inside(self, num_level_anchors: List[int], inside_flags: Tensor) -> List[int]: """Get the anchors of each scale level inside. Args: num_level_anchors (list[int]): Number of anchors of each scale level. inside_flags (Tensor): Multi level inside flags of the image, which are concatenated into a single tensor of shape (num_base_priors,). Returns: list[int]: Number of anchors of each scale level inside. """ split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
36,278
44.633962
79
py
ERD
ERD-main/mmdet/models/dense_heads/free_anchor_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from mmdet.utils import InstanceList, OptConfigType, OptInstanceList from ..utils import multi_apply from .retina_head import RetinaHead EPS = 1e-12 @MODELS.register_module() class FreeAnchorRetinaHead(RetinaHead): """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config norm layer. Defaults to norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). pre_anchor_topk (int): Number of boxes that be token in each bag. Defaults to 50 bbox_thr (float): The threshold of the saturated linear function. It is usually the same with the IoU threshold used in NMS. Defaults to 0.6. gamma (float): Gamma parameter in focal loss. Defaults to 2.0. alpha (float): Alpha parameter in focal loss. Defaults to 0.5. """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, pre_anchor_topk: int = 50, bbox_thr: float = 0.6, gamma: float = 2.0, alpha: float = 0.5, **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, stacked_convs=stacked_convs, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs) self.pre_anchor_topk = pre_anchor_topk self.bbox_thr = bbox_thr self.gamma = gamma self.alpha = alpha def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, _ = self.get_anchors( featmap_sizes=featmap_sizes, batch_img_metas=batch_img_metas, device=device) concat_anchor_list = [torch.cat(anchor) for anchor in anchor_list] # concatenate each level cls_scores = [ cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1, self.cls_out_channels) for cls in cls_scores ] bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) for bbox_pred in bbox_preds ] cls_scores = torch.cat(cls_scores, dim=1) cls_probs = torch.sigmoid(cls_scores) bbox_preds = torch.cat(bbox_preds, dim=1) box_probs, positive_losses, num_pos_list = multi_apply( self.positive_loss_single, cls_probs, bbox_preds, concat_anchor_list, batch_gt_instances) num_pos = sum(num_pos_list) positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) # box_prob: P{a_{j} \in A_{+}} box_probs = torch.stack(box_probs, dim=0) # negative_loss: # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| negative_loss = self.negative_bag_loss(cls_probs, box_probs).sum() / \ max(1, num_pos * self.pre_anchor_topk) # avoid the absence of gradients in regression subnet # when no ground-truth in a batch if num_pos == 0: positive_loss = bbox_preds.sum() * 0 losses = { 'positive_bag_loss': positive_loss, 'negative_bag_loss': negative_loss } return losses def positive_loss_single(self, cls_prob: Tensor, bbox_pred: Tensor, flat_anchors: Tensor, gt_instances: InstanceData) -> tuple: """Compute positive loss. Args: cls_prob (Tensor): Classification probability of shape (num_anchors, num_classes). bbox_pred (Tensor): Box probability of shape (num_anchors, 4). flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. Returns: tuple: - box_prob (Tensor): Box probability of shape (num_anchors, 4). - positive_loss (Tensor): Positive loss of shape (num_pos, ). - num_pos (int): positive samples indexes. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels with torch.no_grad(): if len(gt_bboxes) == 0: image_box_prob = torch.zeros( flat_anchors.size(0), self.cls_out_channels).type_as(bbox_pred) else: # box_localization: a_{j}^{loc}, shape: [j, 4] pred_boxes = self.bbox_coder.decode(flat_anchors, bbox_pred) # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] object_box_iou = bbox_overlaps(gt_bboxes, pred_boxes) # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] t1 = self.bbox_thr t2 = object_box_iou.max( dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( min=0, max=1) # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] num_obj = gt_labels.size(0) indices = torch.stack( [torch.arange(num_obj).type_as(gt_labels), gt_labels], dim=0) object_cls_box_prob = torch.sparse_coo_tensor( indices, object_box_prob) # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] """ from "start" to "end" implement: image_box_iou = torch.sparse.max(object_cls_box_prob, dim=0).t() """ # start box_cls_prob = torch.sparse.sum( object_cls_box_prob, dim=0).to_dense() indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() if indices.numel() == 0: image_box_prob = torch.zeros( flat_anchors.size(0), self.cls_out_channels).type_as(object_box_prob) else: nonzero_box_prob = torch.where( (gt_labels.unsqueeze(dim=-1) == indices[0]), object_box_prob[:, indices[1]], torch.tensor( [0]).type_as(object_box_prob)).max(dim=0).values # upmap to shape [j, c] image_box_prob = torch.sparse_coo_tensor( indices.flip([0]), nonzero_box_prob, size=(flat_anchors.size(0), self.cls_out_channels)).to_dense() # end box_prob = image_box_prob # construct bags for objects match_quality_matrix = bbox_overlaps(gt_bboxes, flat_anchors) _, matched = torch.topk( match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False) del match_quality_matrix # matched_cls_prob: P_{ij}^{cls} matched_cls_prob = torch.gather( cls_prob[matched], 2, gt_labels.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, 1)).squeeze(2) # matched_box_prob: P_{ij}^{loc} matched_anchors = flat_anchors[matched] matched_object_targets = self.bbox_coder.encode( matched_anchors, gt_bboxes.unsqueeze(dim=1).expand_as(matched_anchors)) loss_bbox = self.loss_bbox( bbox_pred[matched], matched_object_targets, reduction_override='none').sum(-1) matched_box_prob = torch.exp(-loss_bbox) # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} num_pos = len(gt_bboxes) positive_loss = self.positive_bag_loss(matched_cls_prob, matched_box_prob) return box_prob, positive_loss, num_pos def positive_bag_loss(self, matched_cls_prob: Tensor, matched_box_prob: Tensor) -> Tensor: """Compute positive bag loss. :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. Args: matched_cls_prob (Tensor): Classification probability of matched samples in shape (num_gt, pre_anchor_topk). matched_box_prob (Tensor): BBox probability of matched samples, in shape (num_gt, pre_anchor_topk). Returns: Tensor: Positive bag loss in shape (num_gt,). """ # noqa: E501, W605 # bag_prob = Mean-max(matched_prob) matched_prob = matched_cls_prob * matched_box_prob weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) weight /= weight.sum(dim=1).unsqueeze(dim=-1) bag_prob = (weight * matched_prob).sum(dim=1) # positive_bag_loss = -self.alpha * log(bag_prob) return self.alpha * F.binary_cross_entropy( bag_prob, torch.ones_like(bag_prob), reduction='none') def negative_bag_loss(self, cls_prob: Tensor, box_prob: Tensor) -> Tensor: """Compute negative bag loss. :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. :math:`P_{j}^{bg}`: Classification probability of negative samples. Args: cls_prob (Tensor): Classification probability, in shape (num_img, num_anchors, num_classes). box_prob (Tensor): Box probability, in shape (num_img, num_anchors, num_classes). Returns: Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. prob = prob.clamp(min=EPS, max=1 - EPS) negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( prob, torch.zeros_like(prob), reduction='none') return (1 - self.alpha) * negative_bag_loss
12,824
39.974441
94
py
ERD
ERD-main/mmdet/models/dense_heads/guided_anchor_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv.ops import DeformConv2d, MaskedConv2d from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList) from ..layers import multiclass_nms from ..task_modules.prior_generators import anchor_inside_flags, calc_region from ..task_modules.samplers import PseudoSampler from ..utils import images_to_levels, multi_apply, unmap from .anchor_head import AnchorHead class FeatureAdaption(BaseModule): """Feature Adaption Module. Feature Adaption Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Deformable conv kernel size. Defaults to 3. deform_groups (int): Deformable conv group size. Defaults to 4. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict], optional): Initialization config dict. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, deform_groups: int = 4, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.1, override=dict(type='Normal', name='conv_adaption', std=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 2, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor, shape: Tensor) -> Tensor: offset = self.conv_offset(shape.detach()) x = self.relu(self.conv_adaption(x, offset)) return x @MODELS.register_module() class GuidedAnchorHead(AnchorHead): """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). This GuidedAnchorHead will predict high-quality feature guided anchors and locations where anchors will be kept in inference. There are mainly 3 categories of bounding-boxes. - Sampled 9 pairs for target assignment. (approxes) - The square boxes where the predicted anchors are based on. (squares) - Guided anchors. Please refer to https://arxiv.org/abs/1901.03278 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Defaults to 256. approx_anchor_generator (:obj:`ConfigDict` or dict): Config dict for approx generator square_anchor_generator (:obj:`ConfigDict` or dict): Config dict for square generator anchor_coder (:obj:`ConfigDict` or dict): Config dict for anchor coder bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Defaults to False. It should be `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. deform_groups: (int): Group number of DCN in FeatureAdaption module. Defaults to 4. loc_filter_thr (float): Threshold to filter out unconcerned regions. Defaults to 0.01. loss_loc (:obj:`ConfigDict` or dict): Config of location loss. loss_shape (:obj:`ConfigDict` or dict): Config of anchor shape loss. loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox (:obj:`ConfigDict` or dict): Config of bbox regression loss. init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \ list[dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int, feat_channels: int = 256, approx_anchor_generator: ConfigType = dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator: ConfigType = dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), reg_decoded_bbox: bool = False, deform_groups: int = 4, loc_filter_thr: float = 0.01, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, loss_loc: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape: ConfigType = dict( type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_loc', std=0.01, lbias_prob=0.01)) ) -> None: super(AnchorHead, self).__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.deform_groups = deform_groups self.loc_filter_thr = loc_filter_thr # build approx_anchor_generator and square_anchor_generator assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = TASK_UTILS.build( approx_anchor_generator) self.square_anchor_generator = TASK_UTILS.build( square_anchor_generator) self.approxs_per_octave = self.approx_anchor_generator \ .num_base_priors[0] self.reg_decoded_bbox = reg_decoded_bbox # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 # build bbox_coder self.anchor_coder = TASK_UTILS.build(anchor_coder) self.bbox_coder = TASK_UTILS.build(bbox_coder) # build losses self.loss_loc = MODELS.build(loss_loc) self.loss_shape = MODELS.build(loss_shape) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) # use PseudoSampler when no sampler in train_cfg if train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler() self.ga_assigner = TASK_UTILS.build(self.train_cfg['ga_assigner']) if train_cfg.get('ga_sampler', None) is not None: self.ga_sampler = TASK_UTILS.build( self.train_cfg['ga_sampler'], default_args=dict(context=self)) else: self.ga_sampler = PseudoSampler() self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2, 1) self.feature_adaption = FeatureAdaption( self.in_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = MaskedConv2d( self.feat_channels, self.num_base_priors * self.cls_out_channels, 1) self.conv_reg = MaskedConv2d(self.feat_channels, self.num_base_priors * 4, 1) def forward_single(self, x: Tensor) -> Tuple[Tensor]: """Forward feature of a single scale level.""" loc_pred = self.conv_loc(x) shape_pred = self.conv_shape(x) x = self.feature_adaption(x, shape_pred) # masked conv is only used during inference for speed-up if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.conv_cls(x, mask) bbox_pred = self.conv_reg(x, mask) return cls_score, bbox_pred, shape_pred, loc_pred def forward(self, x: List[Tensor]) -> Tuple[List[Tensor]]: """Forward features from the upstream network.""" return multi_apply(self.forward_single, x) def get_sampled_approxs(self, featmap_sizes: List[Tuple[int, int]], batch_img_metas: List[dict], device: str = 'cuda') -> tuple: """Get sampled approxs and inside flags according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. batch_img_metas (list[dict]): Image meta info. device (str): device for returned tensors Returns: tuple: approxes of each image, inside flags of each image """ num_imgs = len(batch_img_metas) # since feature map sizes of all images are the same, we only compute # approxes for one time multi_level_approxs = self.approx_anchor_generator.grid_priors( featmap_sizes, device=device) approxs_list = [multi_level_approxs for _ in range(num_imgs)] # for each image, we compute inside flags of multi level approxes inside_flag_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_flags = [] multi_level_approxs = approxs_list[img_id] # obtain valid flags for each approx first multi_level_approx_flags = self.approx_anchor_generator \ .valid_flags(featmap_sizes, img_meta['pad_shape'], device=device) for i, flags in enumerate(multi_level_approx_flags): approxs = multi_level_approxs[i] inside_flags_list = [] for j in range(self.approxs_per_octave): split_valid_flags = flags[j::self.approxs_per_octave] split_approxs = approxs[j::self.approxs_per_octave, :] inside_flags = anchor_inside_flags( split_approxs, split_valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) inside_flags_list.append(inside_flags) # inside_flag for a position is true if any anchor in this # position is true inside_flags = ( torch.stack(inside_flags_list, 0).sum(dim=0) > 0) multi_level_flags.append(inside_flags) inside_flag_list.append(multi_level_flags) return approxs_list, inside_flag_list def get_anchors(self, featmap_sizes: List[Tuple[int, int]], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_img_metas: List[dict], use_loc_filter: bool = False, device: str = 'cuda') -> tuple: """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. shape_preds (list[tensor]): Multi-level shape predictions. loc_preds (list[tensor]): Multi-level location predictions. batch_img_metas (list[dict]): Image meta info. use_loc_filter (bool): Use loc filter or not. Defaults to False device (str): device for returned tensors. Defaults to `cuda`. Returns: tuple: square approxs of each image, guided anchors of each image, loc masks of each image. """ num_imgs = len(batch_img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] # for each image, we compute multi level guided anchors guided_anchors_list = [] loc_mask_list = [] for img_id, img_meta in enumerate(batch_img_metas): multi_level_guided_anchors = [] multi_level_loc_mask = [] for i in range(num_levels): squares = squares_list[img_id][i] shape_pred = shape_preds[i][img_id] loc_pred = loc_preds[i][img_id] guided_anchors, loc_mask = self._get_guided_anchors_single( squares, shape_pred, loc_pred, use_loc_filter=use_loc_filter) multi_level_guided_anchors.append(guided_anchors) multi_level_loc_mask.append(loc_mask) guided_anchors_list.append(multi_level_guided_anchors) loc_mask_list.append(multi_level_loc_mask) return squares_list, guided_anchors_list, loc_mask_list def _get_guided_anchors_single( self, squares: Tensor, shape_pred: Tensor, loc_pred: Tensor, use_loc_filter: bool = False) -> Tuple[Tensor]: """Get guided anchors and loc masks for a single level. Args: squares (tensor): Squares of a single level. shape_pred (tensor): Shape predictions of a single level. loc_pred (tensor): Loc predictions of a single level. use_loc_filter (list[tensor]): Use loc filter or not. Defaults to False. Returns: tuple: guided anchors, location masks """ # calculate location filtering mask loc_pred = loc_pred.sigmoid().detach() if use_loc_filter: loc_mask = loc_pred >= self.loc_filter_thr else: loc_mask = loc_pred >= 0.0 mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors) mask = mask.contiguous().view(-1) # calculate guided anchors squares = squares[mask] anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( -1, 2).detach()[mask] bbox_deltas = anchor_deltas.new_full(squares.size(), 0) bbox_deltas[:, 2:] = anchor_deltas guided_anchors = self.anchor_coder.decode( squares, bbox_deltas, wh_ratio_clip=1e-6) return guided_anchors, mask def ga_loc_targets(self, batch_gt_instances: InstanceList, featmap_sizes: List[Tuple[int, int]]) -> tuple: """Compute location targets for guided anchoring. Each feature map is divided into positive, negative and ignore regions. - positive regions: target 1, weight 1 - ignore regions: target 0, weight 0 - negative regions: target 0, weight 0.1 Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. featmap_sizes (list[tuple]): Multi level sizes of each feature maps. Returns: tuple: Returns a tuple containing location targets. """ anchor_scale = self.approx_anchor_generator.octave_base_scale anchor_strides = self.approx_anchor_generator.strides # Currently only supports same stride in x and y direction. for stride in anchor_strides: assert (stride[0] == stride[1]) anchor_strides = [stride[0] for stride in anchor_strides] center_ratio = self.train_cfg['center_ratio'] ignore_ratio = self.train_cfg['ignore_ratio'] img_per_gpu = len(batch_gt_instances) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=batch_gt_instances[0].bboxes.device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = batch_gt_instances[img_id].bboxes scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) # assign gt bboxes to different feature levels w.r.t. their scales target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] # calculate ignore regions ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) # calculate positive (center) regions ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 # calculate ignore map on nearby low level feature if lvl > 0: d_lvl = lvl - 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 # calculate ignore map on nearby high level feature if lvl < num_lvls - 1: u_lvl = lvl + 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): # ignore negative regions w.r.t. ignore map all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 # set negative regions with weight 0.1 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 # loc average factor to balance loss loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def _ga_shape_target_single(self, flat_approxs: Tensor, inside_flags: Tensor, flat_squares: Tensor, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData], img_meta: dict, unmap_outputs: bool = True) -> tuple: """Compute guided anchoring targets. This function returns sampled anchors and gt bboxes directly rather than calculates regression targets. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. img_meta (dict): Meta info of a single image. unmap_outputs (bool): unmap outputs or not. Returns: tuple: Returns a tuple containing shape targets of each image. """ if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors num_square = flat_squares.size(0) approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4) approxs = approxs[inside_flags, ...] squares = flat_squares[inside_flags, :] pred_instances = InstanceData() pred_instances.priors = squares pred_instances.approxs = approxs assign_result = self.ga_assigner.assign( pred_instances=pred_instances, gt_instances=gt_instances, gt_instances_ignore=gt_instances_ignore) sampling_result = self.ga_sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) bbox_anchors = torch.zeros_like(squares) bbox_gts = torch.zeros_like(squares) bbox_weights = torch.zeros_like(squares) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes bbox_weights[pos_inds, :] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds, sampling_result) def ga_shape_targets(self, approx_list: List[List[Tensor]], inside_flag_list: List[List[Tensor]], square_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True) -> tuple: """Compute guided anchoring targets. Args: approx_list (list[list[Tensor]]): Multi level approxs of each image. inside_flag_list (list[list[Tensor]]): Multi level inside flags of each image. square_list (list[list[Tensor]]): Multi level squares of each image. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): unmap outputs or not. Defaults to None. Returns: tuple: Returns a tuple containing shape targets. """ num_imgs = len(batch_img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, batch_gt_instances, batch_gt_instances_ignore, batch_img_metas, unmap_outputs=unmap_outputs) # sampled anchors of all images avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, avg_factor) def loss_shape_single(self, shape_pred: Tensor, bbox_anchors: Tensor, bbox_gts: Tensor, anchor_weights: Tensor, avg_factor: int) -> Tensor: """Compute shape loss in single level.""" shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) bbox_anchors = bbox_anchors.contiguous().view(-1, 4) bbox_gts = bbox_gts.contiguous().view(-1, 4) anchor_weights = anchor_weights.contiguous().view(-1, 4) bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) bbox_deltas[:, 2:] += shape_pred # filter out negative samples to speed-up weighted_bounded_iou_loss inds = torch.nonzero( anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) bbox_deltas_ = bbox_deltas[inds] bbox_anchors_ = bbox_anchors[inds] bbox_gts_ = bbox_gts[inds] anchor_weights_ = anchor_weights[inds] pred_anchors_ = self.anchor_coder.decode( bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) loss_shape = self.loss_shape( pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=avg_factor) return loss_shape def loss_loc_single(self, loc_pred: Tensor, loc_target: Tensor, loc_weight: Tensor, avg_factor: float) -> Tensor: """Compute location loss in single level.""" loss_loc = self.loss_loc( loc_pred.reshape(-1, 1), loc_target.reshape(-1).long(), loc_weight.reshape(-1), avg_factor=avg_factor) return loss_loc def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). shape_preds (list[Tensor]): shape predictions for each scale level with shape (N, 1, H, W). loc_preds (list[Tensor]): location predictions for each scale level with shape (N, num_anchors * 2, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get loc targets loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( batch_gt_instances, featmap_sizes) # get sampled approxes approxs_list, inside_flag_list = self.get_sampled_approxs( featmap_sizes, batch_img_metas, device=device) # get squares and guided anchors squares_list, guided_anchors_list, _ = self.get_anchors( featmap_sizes, shape_preds, loc_preds, batch_img_metas, device=device) # get shape targets shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, squares_list, batch_gt_instances, batch_img_metas) (bbox_anchors_list, bbox_gts_list, anchor_weights_list, ga_avg_factor) = shape_targets # get anchor targets cls_reg_targets = self.get_targets( guided_anchors_list, inside_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets # anchor number of multi levels num_level_anchors = [ anchors.size(0) for anchors in guided_anchors_list[0] ] # concat all level anchors to a single tensor concat_anchor_list = [] for i in range(len(guided_anchors_list)): concat_anchor_list.append(torch.cat(guided_anchors_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) # get classification and bbox regression losses losses_cls, losses_bbox = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor=avg_factor) # get anchor location loss losses_loc = [] for i in range(len(loc_preds)): loss_loc = self.loss_loc_single( loc_preds[i], loc_targets[i], loc_weights[i], avg_factor=loc_avg_factor) losses_loc.append(loss_loc) # get anchor shape loss losses_shape = [] for i in range(len(shape_preds)): loss_shape = self.loss_shape_single( shape_preds[i], bbox_anchors_list[i], bbox_gts_list[i], anchor_weights_list[i], avg_factor=ga_avg_factor) losses_shape.append(loss_shape) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shape=losses_shape, loss_loc=losses_loc) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], shape_preds: List[Tensor], loc_preds: List[Tensor], batch_img_metas: List[dict], cfg: OptConfigType = None, rescale: bool = False) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). shape_preds (list[Tensor]): shape predictions for each scale level with shape (N, 1, H, W). loc_preds (list[Tensor]): location predictions for each scale level with shape (N, num_anchors * 2, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( loc_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # get guided anchors _, guided_anchors, loc_masks = self.get_anchors( featmap_sizes, shape_preds, loc_preds, batch_img_metas, use_loc_filter=not self.training, device=device) result_list = [] for img_id in range(len(batch_img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] guided_anchor_list = [ guided_anchors[img_id][i].detach() for i in range(num_levels) ] loc_mask_list = [ loc_masks[img_id][i].detach() for i in range(num_levels) ] proposals = self._predict_by_feat_single( cls_scores=cls_score_list, bbox_preds=bbox_pred_list, mlvl_anchors=guided_anchor_list, mlvl_masks=loc_mask_list, img_meta=batch_img_metas[img_id], cfg=cfg, rescale=rescale) result_list.append(proposals) return result_list def _predict_by_feat_single(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], mlvl_anchors: List[Tensor], mlvl_masks: List[Tensor], img_meta: dict, cfg: ConfigType, rescale: bool = False) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_scores (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). mlvl_anchors (list[Tensor]): Each element in the list is the anchors of a single level in feature pyramid. it has shape (num_priors, 4). mlvl_masks (list[Tensor]): Each element in the list is location masks of a single level. img_meta (dict): Image meta info. cfg (:obj:`ConfigDict` or dict): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bbox_preds = [] mlvl_valid_anchors = [] mlvl_scores = [] for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, mlvl_anchors, mlvl_masks): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue # reshape scores and bbox_pred cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask, :] bbox_pred = bbox_pred[mask, :] if scores.dim() == 0: anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) bbox_pred = bbox_pred.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] mlvl_bbox_preds.append(bbox_pred) mlvl_valid_anchors.append(anchors) mlvl_scores.append(scores) mlvl_bbox_preds = torch.cat(mlvl_bbox_preds) mlvl_anchors = torch.cat(mlvl_valid_anchors) mlvl_scores = torch.cat(mlvl_scores) mlvl_bboxes = self.bbox_coder.decode( mlvl_anchors, mlvl_bbox_preds, max_shape=img_meta['img_shape']) if rescale: assert img_meta.get('scale_factor') is not None mlvl_bboxes /= mlvl_bboxes.new_tensor( img_meta['scale_factor']).repeat((1, 2)) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) # multi class NMS det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) results = InstanceData() results.bboxes = det_bboxes[:, :-1] results.scores = det_bboxes[:, -1] results.labels = det_labels return results
44,103
43.325628
79
py
ERD
ERD-main/mmdet/models/dense_heads/yolof_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule, is_norm from mmengine.model import bias_init_with_prob, constant_init, normal_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean from ..task_modules.prior_generators import anchor_inside_flags from ..utils import levels_to_images, multi_apply, unmap from .anchor_head import AnchorHead INF = 1e8 @MODELS.register_module() class YOLOFHead(AnchorHead): """Detection Head of `YOLOF <https://arxiv.org/abs/2103.09460>`_ Args: num_classes (int): The number of object classes (w/o background) in_channels (list[int]): The number of input channels per scale. cls_num_convs (int): The number of convolutions of cls branch. Defaults to 2. reg_num_convs (int): The number of convolutions of reg branch. Defaults to 4. norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization layer. Defaults to ``dict(type='BN', requires_grad=True)``. """ def __init__(self, num_classes: int, in_channels: List[int], num_cls_convs: int = 2, num_reg_convs: int = 4, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), **kwargs) -> None: self.num_cls_convs = num_cls_convs self.num_reg_convs = num_reg_convs self.norm_cfg = norm_cfg super().__init__( num_classes=num_classes, in_channels=in_channels, **kwargs) def _init_layers(self) -> None: cls_subnet = [] bbox_subnet = [] for i in range(self.num_cls_convs): cls_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) for i in range(self.num_reg_convs): bbox_subnet.append( ConvModule( self.in_channels, self.in_channels, kernel_size=3, padding=1, norm_cfg=self.norm_cfg)) self.cls_subnet = nn.Sequential(*cls_subnet) self.bbox_subnet = nn.Sequential(*bbox_subnet) self.cls_score = nn.Conv2d( self.in_channels, self.num_base_priors * self.num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = nn.Conv2d( self.in_channels, self.num_base_priors * 4, kernel_size=3, stride=1, padding=1) self.object_pred = nn.Conv2d( self.in_channels, self.num_base_priors, kernel_size=3, stride=1, padding=1) def init_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): normal_init(m, mean=0, std=0.01) if is_norm(m): constant_init(m, 1) # Use prior in model initialization to improve stability bias_cls = bias_init_with_prob(0.01) torch.nn.init.constant_(self.cls_score.bias, bias_cls) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: normalized_cls_score (Tensor): Normalized Cls scores for a \ single scale level, the channels number is \ num_base_priors * num_classes. bbox_reg (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_base_priors * 4. """ cls_score = self.cls_score(self.cls_subnet(x)) N, _, H, W = cls_score.shape cls_score = cls_score.view(N, -1, self.num_classes, H, W) reg_feat = self.bbox_subnet(x) bbox_reg = self.bbox_pred(reg_feat) objectness = self.object_pred(reg_feat) # implicit objectness objectness = objectness.view(N, -1, 1, H, W) normalized_cls_score = cls_score + objectness - torch.log( 1. + torch.clamp(cls_score.exp(), max=INF) + torch.clamp(objectness.exp(), max=INF)) normalized_cls_score = normalized_cls_score.view(N, -1, H, W) return normalized_cls_score, bbox_reg def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ assert len(cls_scores) == 1 assert self.prior_generator.num_levels == 1 device = cls_scores[0].device featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) # The output level is always 1 anchor_list = [anchors[0] for anchors in anchor_list] valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] cls_scores_list = levels_to_images(cls_scores) bbox_preds_list = levels_to_images(bbox_preds) cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) if cls_reg_targets is None: return None (batch_labels, batch_label_weights, avg_factor, batch_bbox_weights, batch_pos_predicted_boxes, batch_target_boxes) = cls_reg_targets flatten_labels = batch_labels.reshape(-1) batch_label_weights = batch_label_weights.reshape(-1) cls_score = cls_scores[0].permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() # classification loss loss_cls = self.loss_cls( cls_score, flatten_labels, batch_label_weights, avg_factor=avg_factor) # regression loss if batch_pos_predicted_boxes.shape[0] == 0: # no pos sample loss_bbox = batch_pos_predicted_boxes.sum() * 0 else: loss_bbox = self.loss_bbox( batch_pos_predicted_boxes, batch_target_boxes, batch_bbox_weights.float(), avg_factor=avg_factor) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets(self, cls_scores_list: List[Tensor], bbox_preds_list: List[Tensor], anchor_list: List[Tensor], valid_flag_list: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs: bool = True): """Compute regression and classification targets for anchors in multiple images. Args: cls_scores_list (list[Tensor]): Classification scores of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * num_classes). bbox_preds_list (list[Tensor]): Bbox preds of each image. each is a 4D-tensor, the shape is (h * w, num_anchors * 4). anchor_list (list[Tensor]): Anchors of each image. Each element of is a tensor of shape (h * w * num_anchors, 4). valid_flag_list (list[Tensor]): Valid flags of each image. Each element of is a tensor of shape (h * w * num_anchors, ) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - batch_labels (Tensor): Label of all images. Each element \ of is a tensor of shape (batch, h * w * num_anchors) - batch_label_weights (Tensor): Label weights of all images \ of is a tensor of shape (batch, h * w * num_anchors) - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(batch_img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None] * num_imgs results = multi_apply( self._get_targets_single, bbox_preds_list, anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, pos_inds, neg_inds, sampling_results_list) = results[:5] # Get `avg_factor` of all images, which calculate in `SamplingResult`. # When using sampling method, avg_factor is usually the sum of # positive and negative priors. When using `PseudoSampler`, # `avg_factor` is usually equal to the number of positive priors. avg_factor = sum( [results.avg_factor for results in sampling_results_list]) rest_results = list(results[5:]) # user-added return values batch_labels = torch.stack(all_labels, 0) batch_label_weights = torch.stack(all_label_weights, 0) res = (batch_labels, batch_label_weights, avg_factor) for i, rests in enumerate(rest_results): # user-added return values rest_results[i] = torch.cat(rests, 0) return res + tuple(rest_results) def _get_targets_single(self, bbox_preds: Tensor, flat_anchors: Tensor, valid_flags: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. Args: bbox_preds (Tensor): Bbox prediction of the image, which shape is (h * w ,4) flat_anchors (Tensor): Anchors of the image, which shape is (h * w * num_anchors ,4) valid_flags (Tensor): Valid flags of the image, which shape is (h * w * num_anchors,). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels (Tensor): Labels of image, which shape is (h * w * num_anchors, ). label_weights (Tensor): Label weights of image, which shape is (h * w * num_anchors, ). pos_inds (Tensor): Pos index of image. neg_inds (Tensor): Neg index of image. sampling_result (obj:`SamplingResult`): Sampling result. pos_bbox_weights (Tensor): The Weight of using to calculate the bbox branch loss, which shape is (num, ). pos_predicted_boxes (Tensor): boxes predicted value of using to calculate the bbox branch loss, which shape is (num, 4). pos_target_boxes (Tensor): boxes target value of using to calculate the bbox branch loss, which shape is (num, 4). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg['allowed_border']) if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] bbox_preds = bbox_preds.reshape(-1, 4) bbox_preds = bbox_preds[inside_flags, :] # decoded bbox decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) pred_instances = InstanceData( priors=anchors, decoder_priors=decoder_bbox_preds) assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) pos_bbox_weights = assign_result.get_extra_property('pos_idx') pos_predicted_boxes = assign_result.get_extra_property( 'pos_predicted_boxes') pos_target_boxes = assign_result.get_extra_property('target_boxes') sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) return (labels, label_weights, pos_inds, neg_inds, sampling_result, pos_bbox_weights, pos_predicted_boxes, pos_target_boxes)
17,384
42.4625
79
py
ERD
ERD-main/mmdet/models/dense_heads/sabl_retina_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList) from ..task_modules.samplers import PseudoSampler from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, unmap) from .base_dense_head import BaseDenseHead from .guided_anchor_head import GuidedAnchorHead @MODELS.register_module() class SABLRetinaHead(BaseDenseHead): """Side-Aware Boundary Localization (SABL) for RetinaNet. The anchor generation, assigning and sampling in SABLRetinaHead are the same as GuidedAnchorHead for guided anchoring. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of Convs for classification and regression branches. Defaults to 4. feat_channels (int): Number of hidden channels. Defaults to 256. approx_anchor_generator (:obj:`ConfigType` or dict): Config dict for approx generator. square_anchor_generator (:obj:`ConfigDict` or dict): Config dict for square generator. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for ConvModule. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for Norm Layer. Defaults to None. bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder. reg_decoded_bbox (bool): If true, the regression loss would be applied directly on decoded bounding boxes, converting both the predicted boxes and regression targets to absolute coordinates format. Default False. It should be ``True`` when using ``IoULoss``, ``GIoULoss``, or ``DIoULoss`` in the bbox head. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of SABLRetinaHead. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of SABLRetinaHead. loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. loss_bbox_cls (:obj:`ConfigDict` or dict): Config of classification loss for bbox branch. loss_bbox_reg (:obj:`ConfigDict` or dict): Config of regression loss for bbox branch. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, in_channels: int, stacked_convs: int = 4, feat_channels: int = 256, approx_anchor_generator: ConfigType = dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator: ConfigType = dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, bbox_coder: ConfigType = dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), reg_decoded_bbox: bool = False, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, loss_cls: ConfigType = dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), loss_bbox_reg: ConfigType = dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.num_buckets = bbox_coder['num_buckets'] self.side_num = int(np.ceil(self.num_buckets / 2)) assert (approx_anchor_generator['octave_base_scale'] == square_anchor_generator['scales'][0]) assert (approx_anchor_generator['strides'] == square_anchor_generator['strides']) self.approx_anchor_generator = TASK_UTILS.build( approx_anchor_generator) self.square_anchor_generator = TASK_UTILS.build( square_anchor_generator) self.approxs_per_octave = ( self.approx_anchor_generator.num_base_priors[0]) # one anchor per location self.num_base_priors = self.square_anchor_generator.num_base_priors[0] self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.bbox_coder = TASK_UTILS.build(bbox_coder) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox_cls = MODELS.build(loss_bbox_cls) self.loss_bbox_reg = MODELS.build(loss_bbox_reg) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) # use PseudoSampler when sampling is False if 'sampler' in self.train_cfg: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self._init_layers() def _init_layers(self) -> None: self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.retina_bbox_reg = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) self.retina_bbox_cls = nn.Conv2d( self.feat_channels, self.side_num * 4, 3, padding=1) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_cls_pred = self.retina_bbox_cls(reg_feat) bbox_reg_pred = self.retina_bbox_reg(reg_feat) bbox_pred = (bbox_cls_pred, bbox_reg_pred) return cls_score, bbox_pred def forward(self, feats: List[Tensor]) -> Tuple[List[Tensor]]: return multi_apply(self.forward_single, feats) def get_anchors( self, featmap_sizes: List[tuple], img_metas: List[dict], device: Union[torch.device, str] = 'cuda' ) -> Tuple[List[List[Tensor]], List[List[Tensor]]]: """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = self.square_anchor_generator.grid_priors( featmap_sizes, device=device) squares_list = [multi_level_squares for _ in range(num_imgs)] return squares_list def get_targets(self, approx_list: List[List[Tensor]], inside_flag_list: List[List[Tensor]], square_list: List[List[Tensor]], batch_gt_instances: InstanceList, batch_img_metas, batch_gt_instances_ignore: OptInstanceList = None, unmap_outputs=True) -> tuple: """Compute bucketing targets. Args: approx_list (list[list[Tensor]]): Multi level approxs of each image. inside_flag_list (list[list[Tensor]]): Multi level inside flags of each image. square_list (list[list[Tensor]]): Multi level squares of each image. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: Returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ each level. - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ each level. - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ each level. - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ each level. - num_total_pos (int): Number of positive samples in all images. - num_total_neg (int): Number of negative samples in all images. """ num_imgs = len(batch_img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if batch_gt_instances_ignore is None: batch_gt_instances_ignore = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_cls_targets, all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( self._get_targets_single, approx_flat_list, inside_flag_flat_list, square_flat_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore, unmap_outputs=unmap_outputs) # sampled anchors of all images avg_factor = sum( [results.avg_factor for results in sampling_results_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_squares) label_weights_list = images_to_levels(all_label_weights, num_level_squares) bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, num_level_squares) bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, num_level_squares) bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, num_level_squares) bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, num_level_squares) return (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, avg_factor) def _get_targets_single(self, flat_approxs: Tensor, inside_flags: Tensor, flat_squares: Tensor, gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression and classification targets for anchors in a single image. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Defaults to True. Returns: tuple: - labels_list (Tensor): Labels in a single image. - label_weights (Tensor): Label weights in a single image. - bbox_cls_targets (Tensor): BBox cls targets in a single image. - bbox_cls_weights (Tensor): BBox cls weights in a single image. - bbox_reg_targets (Tensor): BBox reg targets in a single image. - bbox_reg_weights (Tensor): BBox reg weights in a single image. - num_total_pos (int): Number of positive samples in a single \ image. - num_total_neg (int): Number of negative samples in a single \ image. - sampling_result (:obj:`SamplingResult`): Sampling result object. """ if not inside_flags.any(): raise ValueError( 'There is no valid anchor inside the image boundary. Please ' 'check the image size and anchor sizes, or set ' '``allowed_border`` to -1 to skip the condition.') # assign gt and sample anchors num_square = flat_squares.size(0) approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4) approxs = approxs[inside_flags, ...] squares = flat_squares[inside_flags, :] pred_instances = InstanceData() pred_instances.priors = squares pred_instances.approxs = approxs assign_result = self.assigner.assign(pred_instances, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_squares = squares.shape[0] bbox_cls_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_cls_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_targets = squares.new_zeros( (num_valid_squares, self.side_num * 4)) bbox_reg_weights = squares.new_zeros( (num_valid_squares, self.side_num * 4)) labels = squares.new_full((num_valid_squares, ), self.num_classes, dtype=torch.long) label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, pos_bbox_cls_weights) = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights labels[pos_inds] = sampling_result.pos_gt_labels if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, inside_flags) bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, inside_flags) bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, inside_flags) bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds, sampling_result) def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_cls_targets: Tensor, bbox_cls_weights: Tensor, bbox_reg_targets: Tensor, bbox_reg_weights: Tensor, avg_factor: float) -> Tuple[Tensor]: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). labels (Tensor): Labels in a single image. label_weights (Tensor): Label weights in a single level. bbox_cls_targets (Tensor): BBox cls targets in a single level. bbox_cls_weights (Tensor): BBox cls weights in a single level. bbox_reg_targets (Tensor): BBox reg targets in a single level. bbox_reg_weights (Tensor): BBox reg weights in a single level. avg_factor (int): Average factor that is used to average the loss. Returns: tuple: loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) # regression loss bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) (bbox_cls_pred, bbox_reg_pred) = bbox_pred bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( -1, self.side_num * 4) loss_bbox_cls = self.loss_bbox_cls( bbox_cls_pred, bbox_cls_targets.long(), bbox_cls_weights, avg_factor=avg_factor * 4 * self.side_num) loss_bbox_reg = self.loss_bbox_reg( bbox_reg_pred, bbox_reg_targets, bbox_reg_weights, avg_factor=avg_factor * 4 * self.bbox_coder.offset_topk) return loss_cls, loss_bbox_cls, loss_bbox_reg def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.approx_anchor_generator.num_levels device = cls_scores[0].device # get sampled approxes approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( self, featmap_sizes, batch_img_metas, device=device) square_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( approxs_list, inside_flag_list, square_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, avg_factor) = cls_reg_targets losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( self.loss_by_feat_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_cls_targets_list, bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, avg_factor=avg_factor) return dict( loss_cls=losses_cls, loss_bbox_cls=losses_bbox_cls, loss_bbox_reg=losses_bbox_reg) def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: List[dict], cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. cfg (:obj:`ConfigDict`, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device mlvl_anchors = self.get_anchors( featmap_sizes, batch_img_metas, device=device) result_list = [] for img_id in range(len(batch_img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_cls_pred_list = [ bbox_preds[i][0][img_id].detach() for i in range(num_levels) ] bbox_reg_pred_list = [ bbox_preds[i][1][img_id].detach() for i in range(num_levels) ] proposals = self._predict_by_feat_single( cls_scores=cls_score_list, bbox_cls_preds=bbox_cls_pred_list, bbox_reg_preds=bbox_reg_pred_list, mlvl_anchors=mlvl_anchors[img_id], img_meta=batch_img_metas[img_id], cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(proposals) return result_list def _predict_by_feat_single(self, cls_scores: List[Tensor], bbox_cls_preds: List[Tensor], bbox_reg_preds: List[Tensor], mlvl_anchors: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: cfg = self.test_cfg if cfg is None else cfg nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_confids = [] mlvl_labels = [] assert len(cls_scores) == len(bbox_cls_preds) == len( bbox_reg_preds) == len(mlvl_anchors) for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): assert cls_score.size()[-2:] == bbox_cls_pred.size( )[-2:] == bbox_reg_pred.size()[-2::] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1)[:, :-1] bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( -1, self.side_num * 4) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict( anchors=anchors, bbox_cls_pred=bbox_cls_pred, bbox_reg_pred=bbox_reg_pred)) scores, labels, _, filtered_results = results anchors = filtered_results['anchors'] bbox_cls_pred = filtered_results['bbox_cls_pred'] bbox_reg_pred = filtered_results['bbox_reg_pred'] bbox_preds = [ bbox_cls_pred.contiguous(), bbox_reg_pred.contiguous() ] bboxes, confids = self.bbox_coder.decode( anchors.contiguous(), bbox_preds, max_shape=img_meta['img_shape']) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_confids.append(confids) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.score_factors = torch.cat(mlvl_confids) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta)
31,550
43.626591
79
py
ERD
ERD-main/mmdet/models/dense_heads/fovea_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import DeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig from ..utils import filter_scores_and_topk, multi_apply from .anchor_free_head import AnchorFreeHead INF = 1e8 class FeatureAlign(BaseModule): """Feature Align Module. Feature Align Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deform conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Size of the convolution kernel. ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. deform_groups: (int): Group number of DCN in FeatureAdaption module. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, deform_groups: int = 4, init_cfg: OptMultiConfig = dict( type='Normal', layer='Conv2d', std=0.1, override=dict(type='Normal', name='conv_adaption', std=0.01)) ) -> None: super().__init__(init_cfg=init_cfg) offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 4, deform_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv2d( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deform_groups=deform_groups) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor, shape: Tensor) -> Tensor: """Forward function of feature align module. Args: x (Tensor): Features from the upstream network. shape (Tensor): Exponential of bbox predictions. Returns: x (Tensor): The aligned features. """ offset = self.conv_offset(shape) x = self.relu(self.conv_adaption(x, offset)) return x @MODELS.register_module() class FoveaHead(AnchorFreeHead): """Detection Head of `FoveaBox: Beyond Anchor-based Object Detector. <https://arxiv.org/abs/1904.03797>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. base_edge_list (list[int]): List of edges. scale_ranges (list[tuple]): Range of scales. sigma (float): Super parameter of ``FoveaHead``. with_deform (bool): Whether use deform conv. deform_groups (int): Deformable conv group size. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__(self, num_classes: int, in_channels: int, base_edge_list: List[int] = (16, 32, 64, 128, 256), scale_ranges: List[tuple] = ((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), sigma: float = 0.4, with_deform: bool = False, deform_groups: int = 4, init_cfg: OptMultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.base_edge_list = base_edge_list self.scale_ranges = scale_ranges self.sigma = sigma self.with_deform = with_deform self.deform_groups = deform_groups super().__init__( num_classes=num_classes, in_channels=in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" # box branch super()._init_reg_convs() self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: super()._init_cls_convs() self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs = nn.ModuleList() self.cls_convs.append( ConvModule( self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.conv_cls = nn.Conv2d( int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1) def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. Returns: tuple: scores for each class and bbox predictions of input feature maps. """ cls_feat = x reg_feat = x for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) if self.with_deform: cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) return cls_score, bbox_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_priors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_priors * 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_labels, flatten_bbox_targets = self.get_targets( batch_gt_instances, featmap_sizes, priors) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < self.num_classes)).nonzero().view(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) if num_pos > 0: pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_weights = pos_bbox_targets.new_ones(pos_bbox_targets.size()) loss_bbox = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, pos_weights, avg_factor=num_pos) else: loss_bbox = torch.tensor( 0, dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def get_targets( self, batch_gt_instances: InstanceList, featmap_sizes: List[tuple], priors_list: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: """Compute regression and classification for priors in multiple images. Args: batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. featmap_sizes (list[tuple]): Size tuple of feature maps. priors_list (list[Tensor]): Priors list of each fpn level, each has shape (num_priors, 2). Returns: tuple: Targets of each level. - flatten_labels (list[Tensor]): Labels of each level. - flatten_bbox_targets (list[Tensor]): BBox targets of each level. """ label_list, bbox_target_list = multi_apply( self._get_targets_single, batch_gt_instances, featmap_size_list=featmap_sizes, priors_list=priors_list) flatten_labels = [ torch.cat([ labels_level_img.flatten() for labels_level_img in labels_level ]) for labels_level in zip(*label_list) ] flatten_bbox_targets = [ torch.cat([ bbox_targets_level_img.reshape(-1, 4) for bbox_targets_level_img in bbox_targets_level ]) for bbox_targets_level in zip(*bbox_target_list) ] flatten_labels = torch.cat(flatten_labels) flatten_bbox_targets = torch.cat(flatten_bbox_targets) return flatten_labels, flatten_bbox_targets def _get_targets_single(self, gt_instances: InstanceData, featmap_size_list: List[tuple] = None, priors_list: List[Tensor] = None) -> tuple: """Compute regression and classification targets for a single image. Args: gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. featmap_size_list (list[tuple]): Size tuple of feature maps. priors_list (list[Tensor]): Priors of each fpn level, each has shape (num_priors, 2). Returns: tuple: - label_list (list[Tensor]): Labels of all anchors in the image. - box_target_list (list[Tensor]): BBox targets of all anchors in the image. """ gt_bboxes_raw = gt_instances.bboxes gt_labels_raw = gt_instances.labels gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) label_list = [] bbox_target_list = [] # for each pyramid, find the cls and box target for base_len, (lower_bound, upper_bound), stride, featmap_size, \ priors in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, priors_list): # FG cat_id: [0, num_classes -1], BG cat_id: num_classes priors = priors.view(*featmap_size, 2) x, y = priors[..., 0], priors[..., 1] labels = gt_labels_raw.new_full(featmap_size, self.num_classes) bbox_targets = gt_bboxes_raw.new_ones(featmap_size[0], featmap_size[1], 4) # scale assignment hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(hit_indices) == 0: label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) continue _, hit_index_order = torch.sort(-gt_areas[hit_indices]) hit_indices = hit_indices[hit_index_order] gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride gt_labels = gt_labels_raw[hit_indices] half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) # valid fovea area: left, right, top, down pos_left = torch.ceil( gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_right = torch.floor( gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ clamp(0, featmap_size[1] - 1) pos_top = torch.ceil( gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) pos_down = torch.floor( gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ clamp(0, featmap_size[0] - 1) for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ zip(pos_left, pos_top, pos_right, pos_down, gt_labels, gt_bboxes_raw[hit_indices, :]): labels[py1:py2 + 1, px1:px2 + 1] = label bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) return label_list, bbox_target_list # Same as base_dense_head/_predict_by_feat_single except self._bbox_decode def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], mlvl_priors: List[Tensor], img_meta: dict, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid, has shape (num_priors, 2). img_meta (dict): Image meta info. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bboxes = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ enumerate(zip(cls_score_list, bbox_pred_list, self.strides, self.base_edge_list, mlvl_priors)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. results = filter_scores_and_topk( scores, cfg.score_thr, nms_pre, dict(bbox_pred=bbox_pred, priors=priors)) scores, labels, _, filtered_results = results bbox_pred = filtered_results['bbox_pred'] priors = filtered_results['priors'] bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_labels.append(labels) results = InstanceData() results.bboxes = torch.cat(mlvl_bboxes) results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) def _bbox_decode(self, priors: Tensor, bbox_pred: Tensor, base_len: int, max_shape: int) -> Tensor: """Function to decode bbox. Args: priors (Tensor): Center proiors of an image, has shape (num_instances, 2). bbox_preds (Tensor): Box energies / deltas for all instances, has shape (batch_size, num_instances, 4). base_len (int): The base length. max_shape (int): The max shape of bbox. Returns: Tensor: Decoded bboxes in (tl_x, tl_y, br_x, br_y) format. Has shape (batch_size, num_instances, 4). """ bbox_pred = bbox_pred.exp() y = priors[:, 1] x = priors[:, 0] x1 = (x - base_len * bbox_pred[:, 0]). \ clamp(min=0, max=max_shape[1] - 1) y1 = (y - base_len * bbox_pred[:, 1]). \ clamp(min=0, max=max_shape[0] - 1) x2 = (x + base_len * bbox_pred[:, 2]). \ clamp(min=0, max=max_shape[1] - 1) y2 = (y + base_len * bbox_pred[:, 3]). \ clamp(min=0, max=max_shape[0] - 1) decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) return decoded_bboxes
21,448
41.056863
79
py
ERD
ERD-main/mmdet/models/dense_heads/mask2former_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d from mmcv.ops import point_sample from mmengine.model import ModuleList, caffe2_xavier_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig, reduce_mean from ..layers import Mask2FormerTransformerDecoder, SinePositionalEncoding from ..utils import get_uncertain_point_coords_with_randomness from .anchor_free_head import AnchorFreeHead from .maskformer_head import MaskFormerHead @MODELS.register_module() class Mask2FormerHead(MaskFormerHead): """Implements the Mask2Former head. See `Masked-attention Mask Transformer for Universal Image Segmentation <https://arxiv.org/pdf/2112.01527>`_ for details. Args: in_channels (list[int]): Number of channels in the input feature map. feat_channels (int): Number of channels for features. out_channels (int): Number of channels for output. num_things_classes (int): Number of things. num_stuff_classes (int): Number of stuff. num_queries (int): Number of query in Transformer decoder. pixel_decoder (:obj:`ConfigDict` or dict): Config for pixel decoder. Defaults to None. enforce_decoder_input_project (bool, optional): Whether to add a layer to change the embed_dim of tranformer encoder in pixel decoder to the embed_dim of transformer decoder. Defaults to False. transformer_decoder (:obj:`ConfigDict` or dict): Config for transformer decoder. Defaults to None. positional_encoding (:obj:`ConfigDict` or dict): Config for transformer decoder position encoding. Defaults to dict(num_feats=128, normalize=True). loss_cls (:obj:`ConfigDict` or dict): Config of the classification loss. Defaults to None. loss_mask (:obj:`ConfigDict` or dict): Config of the mask loss. Defaults to None. loss_dice (:obj:`ConfigDict` or dict): Config of the dice loss. Defaults to None. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of Mask2Former head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of Mask2Former head. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, in_channels: List[int], feat_channels: int, out_channels: int, num_things_classes: int = 80, num_stuff_classes: int = 53, num_queries: int = 100, num_transformer_feat_level: int = 3, pixel_decoder: ConfigType = ..., enforce_decoder_input_project: bool = False, transformer_decoder: ConfigType = ..., positional_encoding: ConfigType = dict( num_feats=128, normalize=True), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=[1.0] * 133 + [0.1]), loss_mask: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice: ConfigType = dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, init_cfg: OptMultiConfig = None, **kwargs) -> None: super(AnchorFreeHead, self).__init__(init_cfg=init_cfg) self.num_things_classes = num_things_classes self.num_stuff_classes = num_stuff_classes self.num_classes = self.num_things_classes + self.num_stuff_classes self.num_queries = num_queries self.num_transformer_feat_level = num_transformer_feat_level self.num_heads = transformer_decoder.layer_cfg.cross_attn_cfg.num_heads self.num_transformer_decoder_layers = transformer_decoder.num_layers assert pixel_decoder.encoder.layer_cfg. \ self_attn_cfg.num_levels == num_transformer_feat_level pixel_decoder_ = copy.deepcopy(pixel_decoder) pixel_decoder_.update( in_channels=in_channels, feat_channels=feat_channels, out_channels=out_channels) self.pixel_decoder = MODELS.build(pixel_decoder_) self.transformer_decoder = Mask2FormerTransformerDecoder( **transformer_decoder) self.decoder_embed_dims = self.transformer_decoder.embed_dims self.decoder_input_projs = ModuleList() # from low resolution to high resolution for _ in range(num_transformer_feat_level): if (self.decoder_embed_dims != feat_channels or enforce_decoder_input_project): self.decoder_input_projs.append( Conv2d( feat_channels, self.decoder_embed_dims, kernel_size=1)) else: self.decoder_input_projs.append(nn.Identity()) self.decoder_positional_encoding = SinePositionalEncoding( **positional_encoding) self.query_embed = nn.Embedding(self.num_queries, feat_channels) self.query_feat = nn.Embedding(self.num_queries, feat_channels) # from low resolution to high resolution self.level_embed = nn.Embedding(self.num_transformer_feat_level, feat_channels) self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) self.mask_embed = nn.Sequential( nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), nn.Linear(feat_channels, out_channels)) self.test_cfg = test_cfg self.train_cfg = train_cfg if train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) self.num_points = self.train_cfg.get('num_points', 12544) self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) self.importance_sample_ratio = self.train_cfg.get( 'importance_sample_ratio', 0.75) self.class_weight = loss_cls.class_weight self.loss_cls = MODELS.build(loss_cls) self.loss_mask = MODELS.build(loss_mask) self.loss_dice = MODELS.build(loss_dice) def init_weights(self) -> None: for m in self.decoder_input_projs: if isinstance(m, Conv2d): caffe2_xavier_init(m, bias=0) self.pixel_decoder.init_weights() for p in self.transformer_decoder.parameters(): if p.dim() > 1: nn.init.xavier_normal_(p) def _get_targets_single(self, cls_score: Tensor, mask_pred: Tensor, gt_instances: InstanceData, img_meta: dict) -> Tuple[Tensor]: """Compute classification and mask targets for one image. Args: cls_score (Tensor): Mask score logits from a single decoder layer for one image. Shape (num_queries, cls_out_channels). mask_pred (Tensor): Mask logits for a single decoder layer for one image. Shape (num_queries, h, w). gt_instances (:obj:`InstanceData`): It contains ``labels`` and ``masks``. img_meta (dict): Image informtation. Returns: tuple[Tensor]: A tuple containing the following for one image. - labels (Tensor): Labels of each image. \ shape (num_queries, ). - label_weights (Tensor): Label weights of each image. \ shape (num_queries, ). - mask_targets (Tensor): Mask targets of each image. \ shape (num_queries, h, w). - mask_weights (Tensor): Mask weights of each image. \ shape (num_queries, ). - pos_inds (Tensor): Sampled positive indices for each \ image. - neg_inds (Tensor): Sampled negative indices for each \ image. - sampling_result (:obj:`SamplingResult`): Sampling results. """ gt_labels = gt_instances.labels gt_masks = gt_instances.masks # sample points num_queries = cls_score.shape[0] num_gts = gt_labels.shape[0] point_coords = torch.rand((1, self.num_points, 2), device=cls_score.device) # shape (num_queries, num_points) mask_points_pred = point_sample( mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, 1)).squeeze(1) # shape (num_gts, num_points) gt_points_masks = point_sample( gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, 1)).squeeze(1) sampled_gt_instances = InstanceData( labels=gt_labels, masks=gt_points_masks) sampled_pred_instances = InstanceData( scores=cls_score, masks=mask_points_pred) # assign and sample assign_result = self.assigner.assign( pred_instances=sampled_pred_instances, gt_instances=sampled_gt_instances, img_meta=img_meta) pred_instances = InstanceData(scores=cls_score, masks=mask_pred) sampling_result = self.sampler.sample( assign_result=assign_result, pred_instances=pred_instances, gt_instances=gt_instances) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds # label target labels = gt_labels.new_full((self.num_queries, ), self.num_classes, dtype=torch.long) labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] label_weights = gt_labels.new_ones((self.num_queries, )) # mask target mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] mask_weights = mask_pred.new_zeros((self.num_queries, )) mask_weights[pos_inds] = 1.0 return (labels, label_weights, mask_targets, mask_weights, pos_inds, neg_inds, sampling_result) def _loss_by_feat_single(self, cls_scores: Tensor, mask_preds: Tensor, batch_gt_instances: List[InstanceData], batch_img_metas: List[dict]) -> Tuple[Tensor]: """Loss function for outputs from a single decoder layer. Args: cls_scores (Tensor): Mask score logits from a single decoder layer for all images. Shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. mask_preds (Tensor): Mask logits for a pixel decoder for all images. Shape (batch_size, num_queries, h, w). batch_gt_instances (list[obj:`InstanceData`]): each contains ``labels`` and ``masks``. batch_img_metas (list[dict]): List of image meta information. Returns: tuple[Tensor]: Loss components for outputs from a single \ decoder layer. """ num_imgs = cls_scores.size(0) cls_scores_list = [cls_scores[i] for i in range(num_imgs)] mask_preds_list = [mask_preds[i] for i in range(num_imgs)] (labels_list, label_weights_list, mask_targets_list, mask_weights_list, avg_factor) = self.get_targets(cls_scores_list, mask_preds_list, batch_gt_instances, batch_img_metas) # shape (batch_size, num_queries) labels = torch.stack(labels_list, dim=0) # shape (batch_size, num_queries) label_weights = torch.stack(label_weights_list, dim=0) # shape (num_total_gts, h, w) mask_targets = torch.cat(mask_targets_list, dim=0) # shape (batch_size, num_queries) mask_weights = torch.stack(mask_weights_list, dim=0) # classfication loss # shape (batch_size * num_queries, ) cls_scores = cls_scores.flatten(0, 1) labels = labels.flatten(0, 1) label_weights = label_weights.flatten(0, 1) class_weight = cls_scores.new_tensor(self.class_weight) loss_cls = self.loss_cls( cls_scores, labels, label_weights, avg_factor=class_weight[labels].sum()) num_total_masks = reduce_mean(cls_scores.new_tensor([avg_factor])) num_total_masks = max(num_total_masks, 1) # extract positive ones # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) mask_preds = mask_preds[mask_weights > 0] if mask_targets.shape[0] == 0: # zero match loss_dice = mask_preds.sum() loss_mask = mask_preds.sum() return loss_cls, loss_mask, loss_dice with torch.no_grad(): points_coords = get_uncertain_point_coords_with_randomness( mask_preds.unsqueeze(1), None, self.num_points, self.oversample_ratio, self.importance_sample_ratio) # shape (num_total_gts, h, w) -> (num_total_gts, num_points) mask_point_targets = point_sample( mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) # shape (num_queries, h, w) -> (num_queries, num_points) mask_point_preds = point_sample( mask_preds.unsqueeze(1), points_coords).squeeze(1) # dice loss loss_dice = self.loss_dice( mask_point_preds, mask_point_targets, avg_factor=num_total_masks) # mask loss # shape (num_queries, num_points) -> (num_queries * num_points, ) mask_point_preds = mask_point_preds.reshape(-1) # shape (num_total_gts, num_points) -> (num_total_gts * num_points, ) mask_point_targets = mask_point_targets.reshape(-1) loss_mask = self.loss_mask( mask_point_preds, mask_point_targets, avg_factor=num_total_masks * self.num_points) return loss_cls, loss_mask, loss_dice def _forward_head(self, decoder_out: Tensor, mask_feature: Tensor, attn_mask_target_size: Tuple[int, int]) -> Tuple[Tensor]: """Forward for head part which is called after every decoder layer. Args: decoder_out (Tensor): in shape (batch_size, num_queries, c). mask_feature (Tensor): in shape (batch_size, c, h, w). attn_mask_target_size (tuple[int, int]): target attention mask size. Returns: tuple: A tuple contain three elements. - cls_pred (Tensor): Classification scores in shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred (Tensor): Mask scores in shape \ (batch_size, num_queries,h, w). - attn_mask (Tensor): Attention mask in shape \ (batch_size * num_heads, num_queries, h, w). """ decoder_out = self.transformer_decoder.post_norm(decoder_out) # shape (num_queries, batch_size, c) cls_pred = self.cls_embed(decoder_out) # shape (num_queries, batch_size, c) mask_embed = self.mask_embed(decoder_out) # shape (num_queries, batch_size, h, w) mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) attn_mask = F.interpolate( mask_pred, attn_mask_target_size, mode='bilinear', align_corners=False) # shape (num_queries, batch_size, h, w) -> # (batch_size * num_head, num_queries, h, w) attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( (1, self.num_heads, 1, 1)).flatten(0, 1) attn_mask = attn_mask.sigmoid() < 0.5 attn_mask = attn_mask.detach() return cls_pred, mask_pred, attn_mask def forward(self, x: List[Tensor], batch_data_samples: SampleList) -> Tuple[List[Tensor]]: """Forward function. Args: x (list[Tensor]): Multi scale Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple[list[Tensor]]: A tuple contains two elements. - cls_pred_list (list[Tensor)]: Classification logits \ for each decoder layer. Each is a 3D-tensor with shape \ (batch_size, num_queries, cls_out_channels). \ Note `cls_out_channels` should includes background. - mask_pred_list (list[Tensor]): Mask logits for each \ decoder layer. Each with shape (batch_size, num_queries, \ h, w). """ batch_img_metas = [ data_sample.metainfo for data_sample in batch_data_samples ] batch_size = len(batch_img_metas) mask_features, multi_scale_memorys = self.pixel_decoder(x) # multi_scale_memorys (from low resolution to high resolution) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) # shape (batch_size, c, h, w) -> (batch_size, h*w, c) decoder_input = decoder_input.flatten(2).permute(0, 2, 1) level_embed = self.level_embed.weight[i].view(1, 1, -1) decoder_input = decoder_input + level_embed # shape (batch_size, c, h, w) -> (batch_size, h*w, c) mask = decoder_input.new_zeros( (batch_size, ) + multi_scale_memorys[i].shape[-2:], dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding( mask) decoder_positional_encoding = decoder_positional_encoding.flatten( 2).permute(0, 2, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) # shape (num_queries, c) -> (batch_size, num_queries, c) query_feat = self.query_feat.weight.unsqueeze(0).repeat( (batch_size, 1, 1)) query_embed = self.query_embed.weight.unsqueeze(0).repeat( (batch_size, 1, 1)) cls_pred_list = [] mask_pred_list = [] cls_pred, mask_pred, attn_mask = self._forward_head( query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = i % self.num_transformer_feat_level # if a mask is all True(all background), then set it all False. attn_mask[torch.where( attn_mask.sum(-1) == attn_mask.shape[-1])] = False # cross_attn + self_attn layer = self.transformer_decoder.layers[i] query_feat = layer( query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], cross_attn_mask=attn_mask, query_key_padding_mask=None, # here we do not apply masking on padded region key_padding_mask=None) cls_pred, mask_pred, attn_mask = self._forward_head( query_feat, mask_features, multi_scale_memorys[ (i + 1) % self.num_transformer_feat_level].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return cls_pred_list, mask_pred_list
21,211
44.715517
79
py
ERD
ERD-main/mmdet/models/dense_heads/dense_test_mixins.py
# Copyright (c) OpenMMLab. All rights reserved. import sys import warnings from inspect import signature import torch from mmcv.ops import batched_nms from mmengine.structures import InstanceData from mmdet.structures.bbox import bbox_mapping_back from ..test_time_augs import merge_aug_proposals if sys.version_info >= (3, 7): from mmdet.utils.contextmanagers import completed class BBoxTestMixin(object): """Mixin class for testing det bboxes via DenseHead.""" def simple_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes without test-time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. \ Each item usually contains following keys. \ - scores (Tensor): Classification scores, has a shape (num_instance,) - labels (Tensor): Labels of bboxes, has a shape (num_instances,). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ warnings.warn('You are calling `simple_test_bboxes` in ' '`dense_test_mixins`, but the `dense_test_mixins`' 'will be deprecated soon. Please use ' '`simple_test` instead.') outs = self.forward(feats) results_list = self.get_results( *outs, img_metas=img_metas, rescale=rescale) return results_list def aug_test_bboxes(self, feats, img_metas, rescale=False): """Test det bboxes with test time augmentation, can be applied in DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is ``bboxes`` with shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). The shape of the second tensor in the tuple is ``labels`` with shape (n,). The length of list should always be 1. """ warnings.warn('You are calling `aug_test_bboxes` in ' '`dense_test_mixins`, but the `dense_test_mixins`' 'will be deprecated soon. Please use ' '`aug_test` instead.') # check with_nms argument gb_sig = signature(self.get_results) gb_args = [p.name for p in gb_sig.parameters.values()] gbs_sig = signature(self._get_results_single) gbs_args = [p.name for p in gbs_sig.parameters.values()] assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ f'{self.__class__.__name__}' \ ' does not support test-time augmentation' aug_bboxes = [] aug_scores = [] aug_labels = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.forward(x) bbox_outputs = self.get_results( *outs, img_metas=img_meta, cfg=self.test_cfg, rescale=False, with_nms=False)[0] aug_bboxes.append(bbox_outputs.bboxes) aug_scores.append(bbox_outputs.scores) if len(bbox_outputs) >= 3: aug_labels.append(bbox_outputs.labels) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None if merged_bboxes.numel() == 0: det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) return [ (det_bboxes, merged_labels), ] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.test_cfg.nms) det_bboxes = det_bboxes[:self.test_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( img_metas[0][0]['scale_factor']) results = InstanceData() results.bboxes = _det_bboxes[:, :4] results.scores = _det_bboxes[:, 4] results.labels = det_labels return [results] def aug_test_rpn(self, feats, img_metas): """Test with augmentation for only for ``RPNHead`` and its variants, e.g., ``GARPNHead``, etc. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Proposals of each image, each item has shape (n, 5), where 5 represent (tl_x, tl_y, br_x, br_y, score). """ samples_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(samples_per_gpu)] for x, img_meta in zip(feats, img_metas): results_list = self.simple_test_rpn(x, img_meta) for i, results in enumerate(results_list): proposals = torch.cat( [results.bboxes, results.scores[:, None]], dim=-1) aug_proposals[i].append(proposals) # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' aug_img_metas = [] for i in range(samples_per_gpu): aug_img_meta = [] for j in range(len(img_metas)): aug_img_meta.append(img_metas[j][i]) aug_img_metas.append(aug_img_meta) # after merging, proposals will be rescaled to the original image size merged_proposals = [] for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas): merged_proposal = merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) results = InstanceData() results.bboxes = merged_proposal[:, :4] results.scores = merged_proposal[:, 4] merged_proposals.append(results) return merged_proposals if sys.version_info >= (3, 7): async def async_simple_test_rpn(self, x, img_metas): sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) async with completed( __name__, 'rpn_head_forward', sleep_interval=sleep_interval): rpn_outs = self(x) proposal_list = self.get_results(*rpn_outs, img_metas=img_metas) return proposal_list def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). Returns: tuple[Tensor]: ``bboxes`` with shape (n,4), where 4 represent (tl_x, tl_y, br_x, br_y) and ``scores`` with shape (n,). """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores
9,048
40.893519
79
py
ERD
ERD-main/mmdet/models/dense_heads/gfl_head_increment_erd.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import batched_nms from torch import Tensor from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures import SampleList from mmdet.structures.bbox import distance2bbox, bbox_overlaps from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from .gfl_head import GFLHead from ..task_modules.samplers import PseudoSampler from ..utils import (multi_apply, unpack_gt_instances) class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: ``sum{P(y_i) * y_i}``, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Defaults to 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max: int = 16) -> None: super().__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x: Tensor) -> Tensor: """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @MODELS.register_module() class GFLHeadIncrementERD(GFLHead): """Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection. GFL head structure is similar with ATSS, however GFL uses 1) joint representation for classification and localization quality, and 2) flexible General distribution for bounding box locations, which are supervised by Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively https://arxiv.org/abs/2006.04388 Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Defaults to 4. conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config conv layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (:obj:`ConfigDict` or dict): Config of Quality Focal Loss (QFL). bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults to 'DistancePointBBoxCoder'. reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}`` in QFL setting. Defaults to 16. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Example: >>> self = GFLHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes: int, in_channels: int, stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict( type='GN', num_groups=32, requires_grad=True), loss_dfl: ConfigType = dict( type='DistributionFocalLoss', loss_weight=0.25), loss_ld: ConfigType = dict( type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), reg_max: int = 16, init_cfg: MultiConfig = dict( type='Normal', layer='Conv2d', std=0.01, override=dict( type='Normal', name='gfl_cls', std=0.01, bias_prob=0.01)), **kwargs) -> None: self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max super().__init__( num_classes=num_classes, in_channels=in_channels, bbox_coder=bbox_coder, init_cfg=init_cfg, **kwargs) if self.train_cfg: self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) if self.train_cfg.get('sampler', None) is not None: self.sampler = TASK_UTILS.build( self.train_cfg['sampler'], default_args=dict(context=self)) else: self.sampler = PseudoSampler(context=self) self.integral = Integral(self.reg_max) self.loss_dfl = MODELS.build(loss_dfl) self.loss_ld = MODELS.build(loss_ld) def distill_loss_by_image_single(self, anchors, new_cls_scores, new_bbox_preds, ori_cls_inds, ori_box_inds, ori_cls_scores, ori_bbox_preds, dist_loss_weight, ori_num_classes: int, avg_factor: int) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (Tuple[int]): Stride in this scale level. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ # ===========> distillation classification (only u+2 * sigma) using l2 loss new_topk_cls_scores = new_cls_scores.gather(0, ori_cls_inds.unsqueeze(-1).expand(-1, new_cls_scores.size(-1))) ori_topk_cls_scores = ori_cls_scores.gather(0, ori_cls_inds.unsqueeze(-1).expand(-1, ori_cls_scores.size(-1))) loss_dist_cls = dist_loss_weight * self.l2_loss(new_topk_cls_scores, ori_topk_cls_scores) # ===========> distillation regression (only u+2 * sigma) using ld loss anchor_centers = self.anchor_center(anchors) # ori decode bbox, shape (Num,4) ori_bbox_preds_tblr = self.integral(ori_bbox_preds) decode_bbox_pred = distance2bbox(anchor_centers, ori_bbox_preds_tblr) ori_cls_conf = ori_cls_scores.sigmoid() cls_conf, ids = ori_cls_conf.max(dim=-1) # nms nms_cfg = dict(iou_threshold=0.005) # 0.005 thr_bboxes, thr_scores, thr_id = decode_bbox_pred[ori_box_inds], cls_conf[ori_box_inds], \ ids[ori_box_inds] _, keep = batched_nms(thr_bboxes, thr_scores, thr_id, nms_cfg) nms_bbox_preds = new_bbox_preds.gather( 0, ori_box_inds.unsqueeze(-1).expand(-1, new_bbox_preds.size(-1))) new_topk_bbox_preds = nms_bbox_preds.gather( 0, keep.unsqueeze(-1).expand(-1, nms_bbox_preds.size(-1))) nms_ori_topk_bbox_preds = ori_bbox_preds.gather( 0, ori_box_inds.unsqueeze(-1).expand(-1, ori_bbox_preds.size(-1))) ori_topk_bbox_preds = nms_ori_topk_bbox_preds.gather( 0, keep.unsqueeze(-1).expand(-1, nms_ori_topk_bbox_preds.size(-1))) new_topk_bbox_corners = new_topk_bbox_preds.reshape(-1, self.reg_max + 1) ori_topk_pred_corners = ori_topk_bbox_preds.reshape(-1, self.reg_max + 1) weight_targets = new_cls_scores.reshape(-1, ori_num_classes)[ori_box_inds].detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][keep.reshape(-1)] loss_dist_bbox = dist_loss_weight * self.loss_ld(new_topk_bbox_corners, ori_topk_pred_corners, weight=weight_targets[:, None].expand(-1, 4).reshape( -1), avg_factor=4.0) return loss_dist_cls, loss_dist_bbox def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, stride: Tuple[int], ori_num_classes: int, avg_factor: int) -> dict: """Calculate the loss of a single scale level based on the features extracted by the detection head. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor weight shape (N, num_total_anchors, 4). stride (Tuple[int]): Stride in this scale level. avg_factor (int): Average factor that is used to average the loss. When using sampling method, avg_factor is usually the sum of positive and negative priors. When using `PseudoSampler`, `avg_factor` is usually equal to the number of positive priors. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) # cls_score = cls_score.permute(0, 2, 3, # 1).reshape(-1, self.cls_out_channels) cls_score = cls_score[:, ori_num_classes:].permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels - ori_num_classes) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes - ori_num_classes # only optimize the novel classes labels[labels == self.num_classes] = bg_class_ind # only optimize the novel classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach().sigmoid() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = self.bbox_coder.encode(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = bbox_pred.new_tensor(0) # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=avg_factor) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() @staticmethod def l2_loss(pred, target, reduction='mean'): r"""Function that takes the mean element-wise square value difference. """ assert target.size() == pred.size() loss = (pred - target).pow(2).float() if reduction != 'none': loss = torch.mean(loss) if reduction == 'mean' else torch.sum(loss) return loss def loss_by_feat(self, ori_outs: Tuple[Tensor], new_outs: Tuple[Tensor], ori_topk_cls_inds, # for distillation ori_topk_cls_scores, # for distillation ori_topk_bbox_inds, # for distillation ori_topk_bbox_preds, # for distillation ori_num_classes, dist_loss_weight, model, batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ # ****************************** ori loss ********************************** cls_scores, bbox_preds = new_outs num_imgs = cls_scores[0].size(0) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() losses_cls, losses_bbox, losses_dfl, \ avg_factor = multi_apply( self.loss_by_feat_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.prior_generator.strides, ori_num_classes=ori_num_classes, avg_factor=avg_factor) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) # ****************************** distill loss ********************************** anchor_list = torch.cat(anchor_list, dim=1) bbox_preds_list = [ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4 * (self.reg_max + 1)) for bbox_pred in bbox_preds] bbox_preds_list = torch.cat(bbox_preds_list, dim=1) ori_cls_scores, ori_bbox_preds = ori_outs ori_cls_scores_list = [ ori_cls_score[:, :ori_num_classes, :, :].permute(0, 2, 3, 1).reshape( num_imgs, -1, ori_num_classes) for ori_cls_score in ori_cls_scores] ori_cls_scores_list = torch.cat(ori_cls_scores_list, dim=1) ori_bbox_preds_list = [ ori_bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4 * (self.reg_max + 1)) for ori_bbox_pred in ori_bbox_preds] ori_bbox_preds_list = torch.cat(ori_bbox_preds_list, dim=1) new_cls_scores_list = [ cls_score[:, :ori_num_classes, :, :].permute(0, 2, 3, 1).reshape( num_imgs, -1, ori_num_classes) for cls_score in cls_scores] new_cls_scores_list = torch.cat(new_cls_scores_list, dim=1) loss_dist_cls, loss_dist_bbox = multi_apply( self.distill_loss_by_image_single, anchor_list, new_cls_scores_list, bbox_preds_list, ori_topk_cls_inds, ori_topk_bbox_inds, ori_cls_scores_list, ori_bbox_preds_list, dist_loss_weight=dist_loss_weight, ori_num_classes=ori_num_classes, avg_factor=avg_factor) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl, loss_dist_cls=loss_dist_cls, loss_dist_bbox=loss_dist_bbox) # def loss(self, ori_out: Tuple[Tensor], new_out: Tuple[Tensor],batch_data_samples: SampleList) -> dict: def loss(self, ori_outs: Tuple[Tensor], new_outs: Tuple[Tensor], batch_data_samples: SampleList, topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds, ori_num_classes, dist_loss_weight, model) -> dict: """Perform forward propagation and loss calculation of the detection head on the features of the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: dict: A dictionary of loss components. """ # outs = self(x) outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs loss_inputs = (ori_outs, new_outs, topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds, ori_num_classes, dist_loss_weight, model) + ( batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses
21,772
43.892784
115
py
ERD
ERD-main/mmdet/models/dense_heads/condinst_head.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale from mmengine.config import ConfigDict from mmengine.model import BaseModule, kaiming_init from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, OptInstanceList, reduce_mean) from ..task_modules.prior_generators import MlvlPointGenerator from ..utils import (aligned_bilinear, filter_scores_and_topk, multi_apply, relative_coordinate_maps, select_single_mlvl) from ..utils.misc import empty_instances from .base_mask_head import BaseMaskHead from .fcos_head import FCOSHead INF = 1e8 @MODELS.register_module() class CondInstBboxHead(FCOSHead): """CondInst box head used in https://arxiv.org/abs/1904.02689. Note that CondInst Bbox Head is a extension of FCOS head. Two differences are described as follows: 1. CondInst box head predicts a set of params for each instance. 2. CondInst box head return the pos_gt_inds and pos_inds. Args: num_params (int): Number of params for instance segmentation. """ def __init__(self, *args, num_params: int = 169, **kwargs) -> None: self.num_params = num_params super().__init__(*args, **kwargs) def _init_layers(self) -> None: """Initialize layers of the head.""" super()._init_layers() self.controller = nn.Conv2d( self.feat_channels, self.num_params, 3, padding=1) def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions, centerness predictions and param predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = \ super(FCOSHead, self).forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() param_pred = self.controller(reg_feat) return cls_score, bbox_pred, centerness, param_pred def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], param_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): centerness for each scale level, each is a 4D-tensor, the channel number is num_points * 1. param_preds (List[Tensor]): param_pred for each scale level, each is a 4D-tensor, the channel number is num_params. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] # Need stride for rel coord compute all_level_points_strides = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device, with_stride=True) all_level_points = [i[:, :2] for i in all_level_points_strides] all_level_strides = [i[:, 2] for i in all_level_points_strides] labels, bbox_targets, pos_inds_list, pos_gt_inds_list = \ self.get_targets(all_level_points, batch_gt_instances) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = torch.tensor( len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) num_pos = max(reduce_mean(num_pos), 1.0) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) # centerness weighted iou loss centerness_denorm = max( reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) if len(pos_inds) > 0: pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = self.bbox_coder.decode( pos_points, pos_bbox_preds) pos_decoded_target_preds = self.bbox_coder.decode( pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=centerness_denorm) loss_centerness = self.loss_centerness( pos_centerness, pos_centerness_targets, avg_factor=num_pos) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() self._raw_positive_infos.update(cls_scores=cls_scores) self._raw_positive_infos.update(centernesses=centernesses) self._raw_positive_infos.update(param_preds=param_preds) self._raw_positive_infos.update(all_level_points=all_level_points) self._raw_positive_infos.update(all_level_strides=all_level_strides) self._raw_positive_infos.update(pos_gt_inds_list=pos_gt_inds_list) self._raw_positive_infos.update(pos_inds_list=pos_inds_list) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) def get_targets( self, points: List[Tensor], batch_gt_instances: InstanceList ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]: """Compute regression, classification and centerness targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. Returns: tuple: Targets of each level. - concat_lvl_labels (list[Tensor]): Labels of each level. - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. - pos_inds_list (list[Tensor]): pos_inds of each image. - pos_gt_inds_list (List[Tensor]): pos_gt_inds of each image. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list, pos_inds_list, pos_gt_inds_list = \ multi_apply( self._get_targets_single, batch_gt_instances, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return (concat_lvl_labels, concat_lvl_bbox_targets, pos_inds_list, pos_gt_inds_list) def _get_targets_single( self, gt_instances: InstanceData, points: Tensor, regress_ranges: Tensor, num_points_per_lvl: List[int] ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = len(gt_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels gt_masks = gt_instances.get('masks', None) if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)), \ gt_bboxes.new_zeros((0,), dtype=torch.int64), \ gt_bboxes.new_zeros((0,), dtype=torch.int64) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius # if gt_mask not None, use gt mask's centroid to determine # the center region rather than gt_bbox center if gt_masks is None: center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 else: h, w = gt_masks.height, gt_masks.width masks = gt_masks.to_tensor( dtype=torch.bool, device=gt_bboxes.device) yys = torch.arange( 0, h, dtype=torch.float32, device=masks.device) xxs = torch.arange( 0, w, dtype=torch.float32, device=masks.device) # m00/m10/m01 represent the moments of a contour # centroid is computed by m00/m10 and m00/m01 m00 = masks.sum(dim=-1).sum(dim=-1).clamp(min=1e-6) m10 = (masks * xxs).sum(dim=-1).sum(dim=-1) m01 = (masks * yys[:, None]).sum(dim=-1).sum(dim=-1) center_xs = m10 / m00 center_ys = m01 / m00 center_xs = center_xs[None].expand(num_points, num_gts) center_ys = center_ys[None].expand(num_points, num_gts) center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] # return pos_inds & pos_gt_inds bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().reshape(-1) pos_gt_inds = min_area_inds[labels < self.num_classes] return labels, bbox_targets, pos_inds, pos_gt_inds def get_positive_infos(self) -> InstanceList: """Get positive information from sampling results. Returns: list[:obj:`InstanceData`]: Positive information of each image, usually including positive bboxes, positive labels, positive priors, etc. """ assert len(self._raw_positive_infos) > 0 pos_gt_inds_list = self._raw_positive_infos['pos_gt_inds_list'] pos_inds_list = self._raw_positive_infos['pos_inds_list'] num_imgs = len(pos_gt_inds_list) cls_score_list = [] centerness_list = [] param_pred_list = [] point_list = [] stride_list = [] for cls_score_per_lvl, centerness_per_lvl, param_pred_per_lvl,\ point_per_lvl, stride_per_lvl in \ zip(self._raw_positive_infos['cls_scores'], self._raw_positive_infos['centernesses'], self._raw_positive_infos['param_preds'], self._raw_positive_infos['all_level_points'], self._raw_positive_infos['all_level_strides']): cls_score_per_lvl = \ cls_score_per_lvl.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes) centerness_per_lvl = \ centerness_per_lvl.permute( 0, 2, 3, 1).reshape(num_imgs, -1, 1) param_pred_per_lvl = \ param_pred_per_lvl.permute( 0, 2, 3, 1).reshape(num_imgs, -1, self.num_params) point_per_lvl = point_per_lvl.unsqueeze(0).repeat(num_imgs, 1, 1) stride_per_lvl = stride_per_lvl.unsqueeze(0).repeat(num_imgs, 1) cls_score_list.append(cls_score_per_lvl) centerness_list.append(centerness_per_lvl) param_pred_list.append(param_pred_per_lvl) point_list.append(point_per_lvl) stride_list.append(stride_per_lvl) cls_scores = torch.cat(cls_score_list, dim=1) centernesses = torch.cat(centerness_list, dim=1) param_preds = torch.cat(param_pred_list, dim=1) all_points = torch.cat(point_list, dim=1) all_strides = torch.cat(stride_list, dim=1) positive_infos = [] for i, (pos_gt_inds, pos_inds) in enumerate(zip(pos_gt_inds_list, pos_inds_list)): pos_info = InstanceData() pos_info.points = all_points[i][pos_inds] pos_info.strides = all_strides[i][pos_inds] pos_info.scores = cls_scores[i][pos_inds] pos_info.centernesses = centernesses[i][pos_inds] pos_info.param_preds = param_preds[i][pos_inds] pos_info.pos_assigned_gt_inds = pos_gt_inds pos_info.pos_inds = pos_inds positive_infos.append(pos_info) return positive_infos def predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, param_preds: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. param_preds (list[Tensor], optional): Params for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * num_params, H, W) batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) if score_factors is None: # e.g. Retina, FreeAnchor, Foveabox, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, AutoAssign, etc. with_score_factors = True assert len(cls_scores) == len(score_factors) num_levels = len(cls_scores) featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] all_level_points_strides = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device, with_stride=True) all_level_points = [i[:, :2] for i in all_level_points_strides] all_level_strides = [i[:, 2] for i in all_level_points_strides] result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] cls_score_list = select_single_mlvl( cls_scores, img_id, detach=True) bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) if with_score_factors: score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) else: score_factor_list = [None for _ in range(num_levels)] param_pred_list = select_single_mlvl( param_preds, img_id, detach=True) results = self._predict_by_feat_single( cls_score_list=cls_score_list, bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, param_pred_list=param_pred_list, mlvl_points=all_level_points, mlvl_strides=all_level_strides, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, cls_score_list: List[Tensor], bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], param_pred_list: List[Tensor], mlvl_points: List[Tensor], mlvl_strides: List[Tensor], img_meta: dict, cfg: ConfigDict, rescale: bool = False, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: cls_score_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). param_pred_list (List[Tensor]): Param predition from all scale levels of a single image, each item has shape (num_priors * num_params, H, W). mlvl_points (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. It has shape (num_priors, 2) mlvl_strides (List[Tensor]): Each element in the list is the stride of a single level in feature pyramid. It has shape (num_priors, 1) img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ if score_factor_list[0] is None: # e.g. Retina, FreeAnchor, etc. with_score_factors = False else: # e.g. FCOS, PAA, ATSS, etc. with_score_factors = True cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) mlvl_bbox_preds = [] mlvl_param_preds = [] mlvl_valid_points = [] mlvl_valid_strides = [] mlvl_scores = [] mlvl_labels = [] if with_score_factors: mlvl_score_factors = [] else: mlvl_score_factors = None for level_idx, (cls_score, bbox_pred, score_factor, param_pred, points, strides) in \ enumerate(zip(cls_score_list, bbox_pred_list, score_factor_list, param_pred_list, mlvl_points, mlvl_strides)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] dim = self.bbox_coder.encode_size bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim) if with_score_factors: score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = cls_score.softmax(-1)[:, :-1] param_pred = param_pred.permute(1, 2, 0).reshape(-1, self.num_params) # After https://github.com/open-mmlab/mmdetection/pull/6268/, # this operation keeps fewer bboxes under the same `nms_pre`. # There is no difference in performance for most models. If you # find a slight drop in performance, you can set a larger # `nms_pre` than before. score_thr = cfg.get('score_thr', 0) results = filter_scores_and_topk( scores, score_thr, nms_pre, dict( bbox_pred=bbox_pred, param_pred=param_pred, points=points, strides=strides)) scores, labels, keep_idxs, filtered_results = results bbox_pred = filtered_results['bbox_pred'] param_pred = filtered_results['param_pred'] points = filtered_results['points'] strides = filtered_results['strides'] if with_score_factors: score_factor = score_factor[keep_idxs] mlvl_bbox_preds.append(bbox_pred) mlvl_param_preds.append(param_pred) mlvl_valid_points.append(points) mlvl_valid_strides.append(strides) mlvl_scores.append(scores) mlvl_labels.append(labels) if with_score_factors: mlvl_score_factors.append(score_factor) bbox_pred = torch.cat(mlvl_bbox_preds) priors = cat_boxes(mlvl_valid_points) bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape) results = InstanceData() results.bboxes = bboxes results.scores = torch.cat(mlvl_scores) results.labels = torch.cat(mlvl_labels) results.param_preds = torch.cat(mlvl_param_preds) results.points = torch.cat(mlvl_valid_points) results.strides = torch.cat(mlvl_valid_strides) if with_score_factors: results.score_factors = torch.cat(mlvl_score_factors) return self._bbox_post_process( results=results, cfg=cfg, rescale=rescale, with_nms=with_nms, img_meta=img_meta) class MaskFeatModule(BaseModule): """CondInst mask feature map branch used in \ https://arxiv.org/abs/1904.02689. Args: in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels of the mask feature map branch. start_level (int): The starting feature map level from RPN that will be used to predict the mask feature map. end_level (int): The ending feature map level from rpn that will be used to predict the mask feature map. out_channels (int): Number of output channels of the mask feature map branch. This is the channel count of the mask feature map that to be dynamically convolved with the predicted kernel. mask_stride (int): Downsample factor of the mask feature map output. Defaults to 4. num_stacked_convs (int): Number of convs in mask feature branch. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels: int, feat_channels: int, start_level: int, end_level: int, out_channels: int, mask_stride: int = 4, num_stacked_convs: int = 4, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, init_cfg: MultiConfig = [ dict(type='Normal', layer='Conv2d', std=0.01) ], **kwargs) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.feat_channels = feat_channels self.start_level = start_level self.end_level = end_level self.mask_stride = mask_stride self.num_stacked_convs = num_stacked_convs assert start_level >= 0 and end_level >= start_level self.out_channels = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" self.convs_all_levels = nn.ModuleList() for i in range(self.start_level, self.end_level + 1): convs_per_level = nn.Sequential() convs_per_level.add_module( f'conv{i}', ConvModule( self.in_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False, bias=False)) self.convs_all_levels.append(convs_per_level) conv_branch = [] for _ in range(self.num_stacked_convs): conv_branch.append( ConvModule( self.feat_channels, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=False)) self.conv_branch = nn.Sequential(*conv_branch) self.conv_pred = nn.Conv2d( self.feat_channels, self.out_channels, 1, stride=1) def init_weights(self) -> None: """Initialize weights of the head.""" super().init_weights() kaiming_init(self.convs_all_levels, a=1, distribution='uniform') kaiming_init(self.conv_branch, a=1, distribution='uniform') kaiming_init(self.conv_pred, a=1, distribution='uniform') def forward(self, x: Tuple[Tensor]) -> Tensor: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: Tensor: The predicted mask feature map. """ inputs = x[self.start_level:self.end_level + 1] assert len(inputs) == (self.end_level - self.start_level + 1) feature_add_all_level = self.convs_all_levels[0](inputs[0]) target_h, target_w = feature_add_all_level.size()[2:] for i in range(1, len(inputs)): input_p = inputs[i] x_p = self.convs_all_levels[i](input_p) h, w = x_p.size()[2:] factor_h = target_h // h factor_w = target_w // w assert factor_h == factor_w feature_per_level = aligned_bilinear(x_p, factor_h) feature_add_all_level = feature_add_all_level + \ feature_per_level feature_add_all_level = self.conv_branch(feature_add_all_level) feature_pred = self.conv_pred(feature_add_all_level) return feature_pred @MODELS.register_module() class CondInstMaskHead(BaseMaskHead): """CondInst mask head used in https://arxiv.org/abs/1904.02689. This head outputs the mask for CondInst. Args: mask_feature_head (dict): Config of CondInstMaskFeatHead. num_layers (int): Number of dynamic conv layers. feat_channels (int): Number of channels in the dynamic conv. mask_out_stride (int): The stride of the mask feat. size_of_interest (int): The size of the region used in rel coord. max_masks_to_train (int): Maximum number of masks to train for each image. loss_segm (:obj:`ConfigDict` or dict, optional): Config of segmentation loss. train_cfg (:obj:`ConfigDict` or dict, optional): Training config of head. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of head. """ def __init__(self, mask_feature_head: ConfigType, num_layers: int = 3, feat_channels: int = 8, mask_out_stride: int = 4, size_of_interest: int = 8, max_masks_to_train: int = -1, topk_masks_per_img: int = -1, loss_mask: ConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None) -> None: super().__init__() self.mask_feature_head = MaskFeatModule(**mask_feature_head) self.mask_feat_stride = self.mask_feature_head.mask_stride self.in_channels = self.mask_feature_head.out_channels self.num_layers = num_layers self.feat_channels = feat_channels self.size_of_interest = size_of_interest self.mask_out_stride = mask_out_stride self.max_masks_to_train = max_masks_to_train self.topk_masks_per_img = topk_masks_per_img self.prior_generator = MlvlPointGenerator([self.mask_feat_stride]) self.train_cfg = train_cfg self.test_cfg = test_cfg self.loss_mask = MODELS.build(loss_mask) self._init_layers() def _init_layers(self) -> None: """Initialize layers of the head.""" weight_nums, bias_nums = [], [] for i in range(self.num_layers): if i == 0: weight_nums.append((self.in_channels + 2) * self.feat_channels) bias_nums.append(self.feat_channels) elif i == self.num_layers - 1: weight_nums.append(self.feat_channels * 1) bias_nums.append(1) else: weight_nums.append(self.feat_channels * self.feat_channels) bias_nums.append(self.feat_channels) self.weight_nums = weight_nums self.bias_nums = bias_nums self.num_params = sum(weight_nums) + sum(bias_nums) def parse_dynamic_params( self, params: Tensor) -> Tuple[List[Tensor], List[Tensor]]: """parse the dynamic params for dynamic conv.""" num_insts = params.size(0) params_splits = list( torch.split_with_sizes( params, self.weight_nums + self.bias_nums, dim=1)) weight_splits = params_splits[:self.num_layers] bias_splits = params_splits[self.num_layers:] for i in range(self.num_layers): if i < self.num_layers - 1: weight_splits[i] = weight_splits[i].reshape( num_insts * self.in_channels, -1, 1, 1) bias_splits[i] = bias_splits[i].reshape(num_insts * self.in_channels) else: # out_channels x in_channels x 1 x 1 weight_splits[i] = weight_splits[i].reshape( num_insts * 1, -1, 1, 1) bias_splits[i] = bias_splits[i].reshape(num_insts) return weight_splits, bias_splits def dynamic_conv_forward(self, features: Tensor, weights: List[Tensor], biases: List[Tensor], num_insts: int) -> Tensor: """dynamic forward, each layer follow a relu.""" n_layers = len(weights) x = features for i, (w, b) in enumerate(zip(weights, biases)): x = F.conv2d(x, w, bias=b, stride=1, padding=0, groups=num_insts) if i < n_layers - 1: x = F.relu(x) return x def forward(self, x: tuple, positive_infos: InstanceList) -> tuple: """Forward feature from the upstream network to get prototypes and linearly combine the prototypes, using masks coefficients, into instance masks. Finally, crop the instance masks with given bboxes. Args: x (Tuple[Tensor]): Feature from the upstream network, which is a 4D-tensor. positive_infos (List[:obj:``InstanceData``]): Positive information that calculate from detect head. Returns: tuple: Predicted instance segmentation masks """ mask_feats = self.mask_feature_head(x) return multi_apply(self.forward_single, mask_feats, positive_infos) def forward_single(self, mask_feat: Tensor, positive_info: InstanceData) -> Tensor: """Forward features of a each image.""" pos_param_preds = positive_info.get('param_preds') pos_points = positive_info.get('points') pos_strides = positive_info.get('strides') num_inst = pos_param_preds.shape[0] mask_feat = mask_feat[None].repeat(num_inst, 1, 1, 1) _, _, H, W = mask_feat.size() if num_inst == 0: return (pos_param_preds.new_zeros((0, 1, H, W)), ) locations = self.prior_generator.single_level_grid_priors( mask_feat.size()[2:], 0, device=mask_feat.device) rel_coords = relative_coordinate_maps(locations, pos_points, pos_strides, self.size_of_interest, mask_feat.size()[2:]) mask_head_inputs = torch.cat([rel_coords, mask_feat], dim=1) mask_head_inputs = mask_head_inputs.reshape(1, -1, H, W) weights, biases = self.parse_dynamic_params(pos_param_preds) mask_preds = self.dynamic_conv_forward(mask_head_inputs, weights, biases, num_inst) mask_preds = mask_preds.reshape(-1, H, W) mask_preds = aligned_bilinear( mask_preds.unsqueeze(0), int(self.mask_feat_stride / self.mask_out_stride)).squeeze(0) return (mask_preds, ) def loss_by_feat(self, mask_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], positive_infos: InstanceList, **kwargs) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (list[Tensor]): List of predicted masks, each has shape (num_classes, H, W). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``masks``, and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of multiple images. positive_infos (List[:obj:``InstanceData``]): Information of positive samples of each image that are assigned in detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert positive_infos is not None, \ 'positive_infos should not be None in `CondInstMaskHead`' losses = dict() loss_mask = 0. num_imgs = len(mask_preds) total_pos = 0 for idx in range(num_imgs): (mask_pred, pos_mask_targets, num_pos) = \ self._get_targets_single( mask_preds[idx], batch_gt_instances[idx], positive_infos[idx]) # mask loss total_pos += num_pos if num_pos == 0 or pos_mask_targets is None: loss = mask_pred.new_zeros(1).mean() else: loss = self.loss_mask( mask_pred, pos_mask_targets, reduction_override='none').sum() loss_mask += loss if total_pos == 0: total_pos += 1 # avoid nan loss_mask = loss_mask / total_pos losses.update(loss_mask=loss_mask) return losses def _get_targets_single(self, mask_preds: Tensor, gt_instances: InstanceData, positive_info: InstanceData): """Compute targets for predictions of single image. Args: mask_preds (Tensor): Predicted prototypes with shape (num_classes, H, W). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It should includes ``bboxes``, ``labels``, and ``masks`` attributes. positive_info (:obj:`InstanceData`): Information of positive samples that are assigned in detection head. It usually contains following keys. - pos_assigned_gt_inds (Tensor): Assigner GT indexes of positive proposals, has shape (num_pos, ) - pos_inds (Tensor): Positive index of image, has shape (num_pos, ). - param_pred (Tensor): Positive param preditions with shape (num_pos, num_params). Returns: tuple: Usually returns a tuple containing learning targets. - mask_preds (Tensor): Positive predicted mask with shape (num_pos, mask_h, mask_w). - pos_mask_targets (Tensor): Positive mask targets with shape (num_pos, mask_h, mask_w). - num_pos (int): Positive numbers. """ gt_bboxes = gt_instances.bboxes device = gt_bboxes.device gt_masks = gt_instances.masks.to_tensor( dtype=torch.bool, device=device).float() # process with mask targets pos_assigned_gt_inds = positive_info.get('pos_assigned_gt_inds') scores = positive_info.get('scores') centernesses = positive_info.get('centernesses') num_pos = pos_assigned_gt_inds.size(0) if gt_masks.size(0) == 0 or num_pos == 0: return mask_preds, None, 0 # Since we're producing (near) full image masks, # it'd take too much vram to backprop on every single mask. # Thus we select only a subset. if (self.max_masks_to_train != -1) and \ (num_pos > self.max_masks_to_train): perm = torch.randperm(num_pos) select = perm[:self.max_masks_to_train] mask_preds = mask_preds[select] pos_assigned_gt_inds = pos_assigned_gt_inds[select] num_pos = self.max_masks_to_train elif self.topk_masks_per_img != -1: unique_gt_inds = pos_assigned_gt_inds.unique() num_inst_per_gt = max( int(self.topk_masks_per_img / len(unique_gt_inds)), 1) keep_mask_preds = [] keep_pos_assigned_gt_inds = [] for gt_ind in unique_gt_inds: per_inst_pos_inds = (pos_assigned_gt_inds == gt_ind) mask_preds_per_inst = mask_preds[per_inst_pos_inds] gt_inds_per_inst = pos_assigned_gt_inds[per_inst_pos_inds] if sum(per_inst_pos_inds) > num_inst_per_gt: per_inst_scores = scores[per_inst_pos_inds].sigmoid().max( dim=1)[0] per_inst_centerness = centernesses[ per_inst_pos_inds].sigmoid().reshape(-1, ) select = (per_inst_scores * per_inst_centerness).topk( k=num_inst_per_gt, dim=0)[1] mask_preds_per_inst = mask_preds_per_inst[select] gt_inds_per_inst = gt_inds_per_inst[select] keep_mask_preds.append(mask_preds_per_inst) keep_pos_assigned_gt_inds.append(gt_inds_per_inst) mask_preds = torch.cat(keep_mask_preds) pos_assigned_gt_inds = torch.cat(keep_pos_assigned_gt_inds) num_pos = pos_assigned_gt_inds.size(0) # Follow the origin implement start = int(self.mask_out_stride // 2) gt_masks = gt_masks[:, start::self.mask_out_stride, start::self.mask_out_stride] gt_masks = gt_masks.gt(0.5).float() pos_mask_targets = gt_masks[pos_assigned_gt_inds] return (mask_preds, pos_mask_targets, num_pos) def predict_by_feat(self, mask_preds: List[Tensor], results_list: InstanceList, batch_img_metas: List[dict], rescale: bool = True, **kwargs) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mask_preds (list[Tensor]): Predicted prototypes with shape (num_classes, H, W). results_list (List[:obj:``InstanceData``]): BBoxHead results. batch_img_metas (list[dict]): Meta information of all images. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ assert len(mask_preds) == len(results_list) == len(batch_img_metas) for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] results = results_list[img_id] bboxes = results.bboxes mask_pred = mask_preds[img_id] if bboxes.shape[0] == 0 or mask_pred.shape[0] == 0: results_list[img_id] = empty_instances( [img_meta], bboxes.device, task_type='mask', instance_results=[results])[0] else: im_mask = self._predict_by_feat_single( mask_preds=mask_pred, bboxes=bboxes, img_meta=img_meta, rescale=rescale) results.masks = im_mask return results_list def _predict_by_feat_single(self, mask_preds: Tensor, bboxes: Tensor, img_meta: dict, rescale: bool, cfg: OptConfigType = None): """Transform a single image's features extracted from the head into mask results. Args: mask_preds (Tensor): Predicted prototypes, has shape [H, W, N]. img_meta (dict): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If rescale is False, then returned masks will fit the scale of imgs[0]. cfg (dict, optional): Config used in test phase. Defaults to None. Returns: :obj:`InstanceData`: Processed results of single image. it usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). """ cfg = self.test_cfg if cfg is None else cfg scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) img_h, img_w = img_meta['img_shape'][:2] ori_h, ori_w = img_meta['ori_shape'][:2] mask_preds = mask_preds.sigmoid().unsqueeze(0) mask_preds = aligned_bilinear(mask_preds, self.mask_out_stride) mask_preds = mask_preds[:, :, :img_h, :img_w] if rescale: # in-placed rescale the bboxes scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes /= scale_factor masks = F.interpolate( mask_preds, (ori_h, ori_w), mode='bilinear', align_corners=False).squeeze(0) > cfg.mask_thr else: masks = mask_preds.squeeze(0) > cfg.mask_thr return masks
53,738
42.797066
79
py
ERD
ERD-main/mmdet/models/test_time_augs/det_tta.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch from mmcv.ops import batched_nms from mmengine.model import BaseTTAModel from mmengine.registry import MODELS from mmengine.structures import InstanceData from torch import Tensor from mmdet.structures import DetDataSample from mmdet.structures.bbox import bbox_flip @MODELS.register_module() class DetTTAModel(BaseTTAModel): """Merge augmented detection results, only bboxes corresponding score under flipping and multi-scale resizing can be processed now. Examples: >>> tta_model = dict( >>> type='DetTTAModel', >>> tta_cfg=dict(nms=dict( >>> type='nms', >>> iou_threshold=0.5), >>> max_per_img=100)) >>> >>> tta_pipeline = [ >>> dict(type='LoadImageFromFile', >>> backend_args=None), >>> dict( >>> type='TestTimeAug', >>> transforms=[[ >>> dict(type='Resize', >>> scale=(1333, 800), >>> keep_ratio=True), >>> ], [ >>> dict(type='RandomFlip', prob=1.), >>> dict(type='RandomFlip', prob=0.) >>> ], [ >>> dict( >>> type='PackDetInputs', >>> meta_keys=('img_id', 'img_path', 'ori_shape', >>> 'img_shape', 'scale_factor', 'flip', >>> 'flip_direction')) >>> ]])] """ def __init__(self, tta_cfg=None, **kwargs): super().__init__(**kwargs) self.tta_cfg = tta_cfg def merge_aug_bboxes(self, aug_bboxes: List[Tensor], aug_scores: List[Tensor], img_metas: List[str]) -> Tuple[Tensor, Tensor]: """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) Returns: tuple[Tensor]: ``bboxes`` with shape (n,4), where 4 represent (tl_x, tl_y, br_x, br_y) and ``scores`` with shape (n,). """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): ori_shape = img_info['ori_shape'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] if flip: bboxes = bbox_flip( bboxes=bboxes, img_shape=ori_shape, direction=flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores def merge_preds(self, data_samples_list: List[List[DetDataSample]]): """Merge batch predictions of enhanced data. Args: data_samples_list (List[List[DetDataSample]]): List of predictions of all enhanced data. The outer list indicates images, and the inner list corresponds to the different views of one image. Each element of the inner list is a ``DetDataSample``. Returns: List[DetDataSample]: Merged batch prediction. """ merged_data_samples = [] for data_samples in data_samples_list: merged_data_samples.append(self._merge_single_sample(data_samples)) return merged_data_samples def _merge_single_sample( self, data_samples: List[DetDataSample]) -> DetDataSample: """Merge predictions which come form the different views of one image to one prediction. Args: data_samples (List[DetDataSample]): List of predictions of enhanced data which come form one image. Returns: List[DetDataSample]: Merged prediction. """ aug_bboxes = [] aug_scores = [] aug_labels = [] img_metas = [] # TODO: support instance segmentation TTA assert data_samples[0].pred_instances.get('masks', None) is None, \ 'TTA of instance segmentation does not support now.' for data_sample in data_samples: aug_bboxes.append(data_sample.pred_instances.bboxes) aug_scores.append(data_sample.pred_instances.scores) aug_labels.append(data_sample.pred_instances.labels) img_metas.append(data_sample.metainfo) merged_bboxes, merged_scores = self.merge_aug_bboxes( aug_bboxes, aug_scores, img_metas) merged_labels = torch.cat(aug_labels, dim=0) if merged_bboxes.numel() == 0: return data_samples[0] det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, merged_labels, self.tta_cfg.nms) det_bboxes = det_bboxes[:self.tta_cfg.max_per_img] det_labels = merged_labels[keep_idxs][:self.tta_cfg.max_per_img] results = InstanceData() _det_bboxes = det_bboxes.clone() results.bboxes = _det_bboxes[:, :-1] results.scores = _det_bboxes[:, -1] results.labels = det_labels det_results = data_samples[0] det_results.pred_instances = results return det_results
5,568
37.406897
79
py
ERD
ERD-main/mmdet/models/test_time_augs/merge_augs.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from typing import List, Optional, Union import numpy as np import torch from mmcv.ops import nms from mmengine.config import ConfigDict from torch import Tensor from mmdet.structures.bbox import bbox_mapping_back # TODO remove this, never be used in mmdet def merge_aug_proposals(aug_proposals, img_metas, cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ f'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip, flip_direction) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), aug_proposals[:, -1].contiguous(), cfg.nms.iou_threshold) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(cfg.max_per_img, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals # TODO remove this, never be used in mmdet def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_results(aug_batch_results, aug_batch_img_metas): """Merge augmented detection results, only bboxes corresponding score under flipping and multi-scale resizing can be processed now. Args: aug_batch_results (list[list[[obj:`InstanceData`]]): Detection results of multiple images with different augmentations. The outer list indicate the augmentation . The inter list indicate the batch dimension. Each item usually contains the following keys. - scores (Tensor): Classification scores, in shape (num_instance,) - labels (Tensor): Labels of bboxes, in shape (num_instances,). - bboxes (Tensor): In shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). aug_batch_img_metas (list[list[dict]]): The outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. Each dict in the list contains information of an image in the batch. Returns: batch_results (list[obj:`InstanceData`]): Same with the input `aug_results` except that all bboxes have been mapped to the original scale. """ num_augs = len(aug_batch_results) num_imgs = len(aug_batch_results[0]) batch_results = [] aug_batch_results = copy.deepcopy(aug_batch_results) for img_id in range(num_imgs): aug_results = [] for aug_id in range(num_augs): img_metas = aug_batch_img_metas[aug_id][img_id] results = aug_batch_results[aug_id][img_id] img_shape = img_metas['img_shape'] scale_factor = img_metas['scale_factor'] flip = img_metas['flip'] flip_direction = img_metas['flip_direction'] bboxes = bbox_mapping_back(results.bboxes, img_shape, scale_factor, flip, flip_direction) results.bboxes = bboxes aug_results.append(results) merged_aug_results = results.cat(aug_results) batch_results.append(merged_aug_results) return batch_results def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks: List[Tensor], img_metas: dict, weights: Optional[Union[list, Tensor]] = None) -> Tensor: """Merge augmented mask prediction. Args: aug_masks (list[Tensor]): each has shape (n, c, h, w). img_metas (dict): Image information. weights (list or Tensor): Weight of each aug_masks, the length should be n. Returns: Tensor: has shape (n, c, h, w) """ recovered_masks = [] for i, mask in enumerate(aug_masks): if weights is not None: assert len(weights) == len(aug_masks) weight = weights[i] else: weight = 1 flip = img_metas.get('filp', False) if flip: flip_direction = img_metas['flip_direction'] if flip_direction == 'horizontal': mask = mask[:, :, :, ::-1] elif flip_direction == 'vertical': mask = mask[:, :, ::-1, :] elif flip_direction == 'diagonal': mask = mask[:, :, :, ::-1] mask = mask[:, :, ::-1, :] else: raise ValueError( f"Invalid flipping direction '{flip_direction}'") recovered_masks.append(mask[None, :] * weight) merged_masks = torch.cat(recovered_masks, 0).mean(dim=0) if weights is not None: merged_masks = merged_masks * len(weights) / sum(weights) return merged_masks
8,469
37.5
79
py
ERD
ERD-main/mmdet/models/test_time_augs/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .det_tta import DetTTAModel from .merge_augs import (merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, merge_aug_results, merge_aug_scores) __all__ = [ 'merge_aug_bboxes', 'merge_aug_masks', 'merge_aug_proposals', 'merge_aug_scores', 'merge_aug_results', 'DetTTAModel' ]
389
34.454545
65
py
ERD
ERD-main/mmdet/models/utils/gaussian_target.py
# Copyright (c) OpenMMLab. All rights reserved. from math import sqrt import torch import torch.nn.functional as F def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): """Generate 2D gaussian kernel. Args: radius (int): Radius of gaussian kernel. sigma (int): Sigma of gaussian function. Default: 1. dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. device (str): Device of gaussian tensor. Default: 'cpu'. Returns: h (Tensor): Gaussian kernel with a ``(2 * radius + 1) * (2 * radius + 1)`` shape. """ x = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(1, -1) y = torch.arange( -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() h[h < torch.finfo(h.dtype).eps * h.max()] = 0 return h def gen_gaussian_target(heatmap, center, radius, k=1): """Generate 2D gaussian heatmap. Args: heatmap (Tensor): Input heatmap, the gaussian kernel will cover on it and maintain the max value. center (list[int]): Coord of gaussian kernel's center. radius (int): Radius of gaussian kernel. k (int): Coefficient of gaussian kernel. Default: 1. Returns: out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. """ diameter = 2 * radius + 1 gaussian_kernel = gaussian2D( radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) x, y = center height, width = heatmap.shape[:2] left, right = min(x, radius), min(width - x, radius + 1) top, bottom = min(y, radius), min(height - y, radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian_kernel[radius - top:radius + bottom, radius - left:radius + right] out_heatmap = heatmap torch.max( masked_heatmap, masked_gaussian * k, out=out_heatmap[y - top:y + bottom, x - left:x + right]) return out_heatmap def gaussian_radius(det_size, min_overlap): r"""Generate 2D gaussian radius. This function is modified from the `official github repo <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/ utils.py#L65>`_. Given ``min_overlap``, radius could computed by a quadratic equation according to Vieta's formulas. There are 3 cases for computing gaussian radius, details are following: - Explanation of figure: ``lt`` and ``br`` indicates the left-top and bottom-right corner of ground truth box. ``x`` indicates the generated corner at the limited position when ``radius=r``. - Case1: one corner is inside the gt box and the other is outside. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x----------+--+ | | | | | | | | height | | overlap | | | | | | | | | | v +--+---------br--+ - | | | +----------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case2: both two corners are inside the gt box. .. code:: text |< width >| lt-+----------+ - | | | ^ +--x-------+ | | | | | | |overlap| | height | | | | | +-------x--+ | | | v +----------+-br - To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - Case3: both two corners are outside the gt box. .. code:: text |< width >| x--+----------------+ | | | +-lt-------------+ | - | | | | ^ | | | | | | overlap | | height | | | | | | | | v | +------------br--+ - | | | +----------------+--x To ensure IoU of generated box and gt box is larger than ``min_overlap``: .. math:: \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} Args: det_size (list[int]): Shape of object. min_overlap (float): Min IoU with ground truth for boxes generated by keypoints inside the gaussian kernel. Returns: radius (int): Radius of gaussian kernel. """ height, width = det_size a1 = 1 b1 = (height + width) c1 = width * height * (1 - min_overlap) / (1 + min_overlap) sq1 = sqrt(b1**2 - 4 * a1 * c1) r1 = (b1 - sq1) / (2 * a1) a2 = 4 b2 = 2 * (height + width) c2 = (1 - min_overlap) * width * height sq2 = sqrt(b2**2 - 4 * a2 * c2) r2 = (b2 - sq2) / (2 * a2) a3 = 4 * min_overlap b3 = -2 * min_overlap * (height + width) c3 = (min_overlap - 1) * width * height sq3 = sqrt(b3**2 - 4 * a3 * c3) r3 = (b3 + sq3) / (2 * a3) return min(r1, r2, r3) def get_local_maximum(heat, kernel=3): """Extract local maximum pixel with given kernel. Args: heat (Tensor): Target heatmap. kernel (int): Kernel size of max pooling. Default: 3. Returns: heat (Tensor): A heatmap where local maximum pixels maintain its own value and other positions are 0. """ pad = (kernel - 1) // 2 hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) keep = (hmax == heat).float() return heat * keep def get_topk_from_heatmap(scores, k=20): """Get top k positions from heatmap. Args: scores (Tensor): Target heatmap with shape [batch, num_classes, height, width]. k (int): Target number. Default: 20. Returns: tuple[torch.Tensor]: Scores, indexes, categories and coords of topk keypoint. Containing following Tensors: - topk_scores (Tensor): Max scores of each topk keypoint. - topk_inds (Tensor): Indexes of each topk keypoint. - topk_clses (Tensor): Categories of each topk keypoint. - topk_ys (Tensor): Y-coord of each topk keypoint. - topk_xs (Tensor): X-coord of each topk keypoint. """ batch, _, height, width = scores.size() topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) topk_clses = topk_inds // (height * width) topk_inds = topk_inds % (height * width) topk_ys = topk_inds // width topk_xs = (topk_inds % width).int().float() return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs def gather_feat(feat, ind, mask=None): """Gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. mask (Tensor | None): Mask of feature map. Default: None. Returns: feat (Tensor): Gathered feature. """ dim = feat.size(2) ind = ind.unsqueeze(2).repeat(1, 1, dim) feat = feat.gather(1, ind) if mask is not None: mask = mask.unsqueeze(2).expand_as(feat) feat = feat[mask] feat = feat.view(-1, dim) return feat def transpose_and_gather_feat(feat, ind): """Transpose and gather feature according to index. Args: feat (Tensor): Target feature map. ind (Tensor): Target coord index. Returns: feat (Tensor): Transposed and gathered feature. """ feat = feat.permute(0, 2, 3, 1).contiguous() feat = feat.view(feat.size(0), -1, feat.size(3)) feat = gather_feat(feat, ind) return feat
8,393
30.204461
79
py
ERD
ERD-main/mmdet/models/utils/point_sample.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import point_sample from torch import Tensor def get_uncertainty(mask_preds: Tensor, labels: Tensor) -> Tensor: """Estimate uncertainty based on pred logits. We estimate uncertainty as L1 distance between 0.0 and the logits prediction in 'mask_preds' for the foreground class in `classes`. Args: mask_preds (Tensor): mask predication logits, shape (num_rois, num_classes, mask_height, mask_width). labels (Tensor): Either predicted or ground truth label for each predicted mask, of length num_rois. Returns: scores (Tensor): Uncertainty scores with the most uncertain locations having the highest uncertainty score, shape (num_rois, 1, mask_height, mask_width) """ if mask_preds.shape[1] == 1: gt_class_logits = mask_preds.clone() else: inds = torch.arange(mask_preds.shape[0], device=mask_preds.device) gt_class_logits = mask_preds[inds, labels].unsqueeze(1) return -torch.abs(gt_class_logits) def get_uncertain_point_coords_with_randomness( mask_preds: Tensor, labels: Tensor, num_points: int, oversample_ratio: float, importance_sample_ratio: float) -> Tensor: """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using 'get_uncertainty()' function that takes point's logit prediction as input. Args: mask_preds (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (Tensor): The ground truth class for each instance. num_points (int): The number of points to sample. oversample_ratio (float): Oversampling parameter. importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ assert oversample_ratio >= 1 assert 0 <= importance_sample_ratio <= 1 batch_size = mask_preds.shape[0] num_sampled = int(num_points * oversample_ratio) point_coords = torch.rand( batch_size, num_sampled, 2, device=mask_preds.device) point_logits = point_sample(mask_preds, point_coords) # It is crucial to calculate uncertainty based on the sampled # prediction value for the points. Calculating uncertainties of the # coarse predictions first and sampling them for points leads to # incorrect results. To illustrate this: assume uncertainty func( # logits)=-abs(logits), a sampled point between two coarse # predictions with -1 and 1 logits has 0 logits, and therefore 0 # uncertainty value. However, if we calculate uncertainties for the # coarse predictions first, both will have -1 uncertainty, # and sampled point will get -1 uncertainty. point_uncertainties = get_uncertainty(point_logits, labels) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk( point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_sampled * torch.arange( batch_size, dtype=torch.long, device=mask_preds.device) idx += shift[:, None] point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( batch_size, num_uncertain_points, 2) if num_random_points > 0: rand_roi_coords = torch.rand( batch_size, num_random_points, 2, device=mask_preds.device) point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) return point_coords
3,910
42.94382
75
py
ERD
ERD-main/mmdet/models/utils/misc.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial from typing import List, Sequence, Tuple, Union import numpy as np import torch from mmengine.structures import InstanceData from mmengine.utils import digit_version from six.moves import map, zip from torch import Tensor from torch.autograd import Function from torch.nn import functional as F from mmdet.structures import SampleList from mmdet.structures.bbox import BaseBoxes, get_box_type, stack_boxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import OptInstanceList class SigmoidGeometricMean(Function): """Forward and backward function of geometric mean of two sigmoid functions. This implementation with analytical gradient function substitutes the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The original implementation incurs none during gradient backprapagation if both x and y are very small values. """ @staticmethod def forward(ctx, x, y): x_sigmoid = x.sigmoid() y_sigmoid = y.sigmoid() z = (x_sigmoid * y_sigmoid).sqrt() ctx.save_for_backward(x_sigmoid, y_sigmoid, z) return z @staticmethod def backward(ctx, grad_output): x_sigmoid, y_sigmoid, z = ctx.saved_tensors grad_x = grad_output * z * (1 - x_sigmoid) / 2 grad_y = grad_output * z * (1 - y_sigmoid) / 2 return grad_x, grad_y sigmoid_geometric_mean = SigmoidGeometricMean.apply def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners) def unpack_gt_instances(batch_data_samples: SampleList) -> tuple: """Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based on ``batch_data_samples`` Args: batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple: - batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. - batch_gt_instances_ignore (list[:obj:`InstanceData`]): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. - batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. """ batch_gt_instances = [] batch_gt_instances_ignore = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'ignored_instances' in data_sample: batch_gt_instances_ignore.append(data_sample.ignored_instances) else: batch_gt_instances_ignore.append(None) return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """ if isinstance(mask, (BitmapMasks, PolygonMasks)): mask = mask.to_ndarray() elif isinstance(mask, torch.Tensor): mask = mask.detach().cpu().numpy() elif not isinstance(mask, np.ndarray): raise TypeError(f'Unsupported {type(mask)} data type') return mask def flip_tensor(src_tensor, flip_direction): """flip tensor base on flip_direction. Args: src_tensor (Tensor): input feature map, shape (B, C, H, W). flip_direction (str): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Returns: out_tensor (Tensor): Flipped tensor. """ assert src_tensor.ndim == 4 valid_directions = ['horizontal', 'vertical', 'diagonal'] assert flip_direction in valid_directions if flip_direction == 'horizontal': out_tensor = torch.flip(src_tensor, [3]) elif flip_direction == 'vertical': out_tensor = torch.flip(src_tensor, [2]) else: out_tensor = torch.flip(src_tensor, [2, 3]) return out_tensor def select_single_mlvl(mlvl_tensors, batch_id, detach=True): """Extract a multi-scale single image tensor from a multi-scale batch tensor based on batch index. Note: The default value of detach is True, because the proposal gradient needs to be detached during the training of the two-stage model. E.g Cascade Mask R-CNN. Args: mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, each is a 4D-tensor. batch_id (int): Batch index. detach (bool): Whether detach gradient. Default True. Returns: list[Tensor]: Multi-scale single image tensor. """ assert isinstance(mlvl_tensors, (list, tuple)) num_levels = len(mlvl_tensors) if detach: mlvl_tensor_list = [ mlvl_tensors[i][batch_id].detach() for i in range(num_levels) ] else: mlvl_tensor_list = [ mlvl_tensors[i][batch_id] for i in range(num_levels) ] return mlvl_tensor_list def filter_scores_and_topk(scores, score_thr, topk, results=None): """Filter results using score threshold and topk candidates. Args: scores (Tensor): The scores, shape (num_bboxes, K). score_thr (float): The score filter threshold. topk (int): The number of topk candidates. results (dict or list or Tensor, Optional): The results to which the filtering rule is to be applied. The shape of each item is (num_bboxes, N). Returns: tuple: Filtered results - scores (Tensor): The scores after being filtered, \ shape (num_bboxes_filtered, ). - labels (Tensor): The class labels, shape \ (num_bboxes_filtered, ). - anchor_idxs (Tensor): The anchor indexes, shape \ (num_bboxes_filtered, ). - filtered_results (dict or list or Tensor, Optional): \ The filtered results. The shape of each item is \ (num_bboxes_filtered, N). """ valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask) num_topk = min(topk, valid_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) filtered_results = None if results is not None: if isinstance(results, dict): filtered_results = {k: v[keep_idxs] for k, v in results.items()} elif isinstance(results, list): filtered_results = [result[keep_idxs] for result in results] elif isinstance(results, torch.Tensor): filtered_results = results[keep_idxs] else: raise NotImplementedError(f'Only supports dict or list or Tensor, ' f'but get {type(results)}.') return scores, labels, keep_idxs, filtered_results def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w def generate_coordinate(featmap_sizes, device='cuda'): """Generate the coordinate. Args: featmap_sizes (tuple): The feature to be calculated, of shape (N, C, W, H). device (str): The device where the feature will be put on. Returns: coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). """ x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) y, x = torch.meshgrid(y_range, x_range) y = y.expand([featmap_sizes[0], 1, -1, -1]) x = x.expand([featmap_sizes[0], 1, -1, -1]) coord_feat = torch.cat([x, y], 1) return coord_feat def levels_to_images(mlvl_tensor: List[torch.Tensor]) -> List[torch.Tensor]: """Concat multi-level feature maps by image. [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] Convert the shape of each element in mlvl_tensor from (N, C, H, W) to (N, H*W , C), then split the element to N elements with shape (H*W, C), and concat elements in same image of all level along first dimension. Args: mlvl_tensor (list[Tensor]): list of Tensor which collect from corresponding level. Each element is of shape (N, C, H, W) Returns: list[Tensor]: A list that contains N tensors and each tensor is of shape (num_elements, C) """ batch_size = mlvl_tensor[0].size(0) batch_list = [[] for _ in range(batch_size)] channels = mlvl_tensor[0].size(1) for t in mlvl_tensor: t = t.permute(0, 2, 3, 1) t = t.view(batch_size, -1, channels).contiguous() for img in range(batch_size): batch_list[img].append(t[img]) return [torch.cat(item, 0) for item in batch_list] def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = stack_boxes(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def samplelist_boxtype2tensor(batch_data_samples: SampleList) -> SampleList: for data_samples in batch_data_samples: if 'gt_instances' in data_samples: bboxes = data_samples.gt_instances.get('bboxes', None) if isinstance(bboxes, BaseBoxes): data_samples.gt_instances.bboxes = bboxes.tensor if 'pred_instances' in data_samples: bboxes = data_samples.pred_instances.get('bboxes', None) if isinstance(bboxes, BaseBoxes): data_samples.pred_instances.bboxes = bboxes.tensor if 'ignored_instances' in data_samples: bboxes = data_samples.ignored_instances.get('bboxes', None) if isinstance(bboxes, BaseBoxes): data_samples.ignored_instances.bboxes = bboxes.tensor _torch_version_div_indexing = ( 'parrots' not in torch.__version__ and digit_version(torch.__version__) >= digit_version('1.8')) def floordiv(dividend, divisor, rounding_mode='trunc'): if _torch_version_div_indexing: return torch.div(dividend, divisor, rounding_mode=rounding_mode) else: return dividend // divisor def _filter_gt_instances_by_score(batch_data_samples: SampleList, score_thr: float) -> SampleList: """Filter ground truth (GT) instances by score. Args: batch_data_samples (SampleList): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. score_thr (float): The score filter threshold. Returns: SampleList: The Data Samples filtered by score. """ for data_samples in batch_data_samples: assert 'scores' in data_samples.gt_instances, \ 'there does not exit scores in instances' if data_samples.gt_instances.bboxes.shape[0] > 0: data_samples.gt_instances = data_samples.gt_instances[ data_samples.gt_instances.scores > score_thr] return batch_data_samples def _filter_gt_instances_by_size(batch_data_samples: SampleList, wh_thr: tuple) -> SampleList: """Filter ground truth (GT) instances by size. Args: batch_data_samples (SampleList): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. wh_thr (tuple): Minimum width and height of bbox. Returns: SampleList: The Data Samples filtered by score. """ for data_samples in batch_data_samples: bboxes = data_samples.gt_instances.bboxes if bboxes.shape[0] > 0: w = bboxes[:, 2] - bboxes[:, 0] h = bboxes[:, 3] - bboxes[:, 1] data_samples.gt_instances = data_samples.gt_instances[ (w > wh_thr[0]) & (h > wh_thr[1])] return batch_data_samples def filter_gt_instances(batch_data_samples: SampleList, score_thr: float = None, wh_thr: tuple = None): """Filter ground truth (GT) instances by score and/or size. Args: batch_data_samples (SampleList): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. score_thr (float): The score filter threshold. wh_thr (tuple): Minimum width and height of bbox. Returns: SampleList: The Data Samples filtered by score and/or size. """ if score_thr is not None: batch_data_samples = _filter_gt_instances_by_score( batch_data_samples, score_thr) if wh_thr is not None: batch_data_samples = _filter_gt_instances_by_size( batch_data_samples, wh_thr) return batch_data_samples def rename_loss_dict(prefix: str, losses: dict) -> dict: """Rename the key names in loss dict by adding a prefix. Args: prefix (str): The prefix for loss components. losses (dict): A dictionary of loss components. Returns: dict: A dictionary of loss components with prefix. """ return {prefix + k: v for k, v in losses.items()} def reweight_loss_dict(losses: dict, weight: float) -> dict: """Reweight losses in the dict by weight. Args: losses (dict): A dictionary of loss components. weight (float): Weight for loss components. Returns: dict: A dictionary of weighted loss components. """ for name, loss in losses.items(): if 'loss' in name: if isinstance(loss, Sequence): losses[name] = [item * weight for item in loss] else: losses[name] = loss * weight return losses def relative_coordinate_maps( locations: Tensor, centers: Tensor, strides: Tensor, size_of_interest: int, feat_sizes: Tuple[int], ) -> Tensor: """Generate the relative coordinate maps with feat_stride. Args: locations (Tensor): The prior location of mask feature map. It has shape (num_priors, 2). centers (Tensor): The prior points of a object in all feature pyramid. It has shape (num_pos, 2) strides (Tensor): The prior strides of a object in all feature pyramid. It has shape (num_pos, 1) size_of_interest (int): The size of the region used in rel coord. feat_sizes (Tuple[int]): The feature size H and W, which has 2 dims. Returns: rel_coord_feat (Tensor): The coordinate feature of shape (num_pos, 2, H, W). """ H, W = feat_sizes rel_coordinates = centers.reshape(-1, 1, 2) - locations.reshape(1, -1, 2) rel_coordinates = rel_coordinates.permute(0, 2, 1).float() rel_coordinates = rel_coordinates / ( strides[:, None, None] * size_of_interest) return rel_coordinates.reshape(-1, 2, H, W) def aligned_bilinear(tensor: Tensor, factor: int) -> Tensor: """aligned bilinear, used in original implement in CondInst: https://github.com/aim-uofa/AdelaiDet/blob/\ c0b2092ce72442b0f40972f7c6dda8bb52c46d16/adet/utils/comm.py#L23 """ assert tensor.dim() == 4 assert factor >= 1 assert int(factor) == factor if factor == 1: return tensor h, w = tensor.size()[2:] tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode='replicate') oh = factor * h + 1 ow = factor * w + 1 tensor = F.interpolate( tensor, size=(oh, ow), mode='bilinear', align_corners=True) tensor = F.pad( tensor, pad=(factor // 2, 0, factor // 2, 0), mode='replicate') return tensor[:, :, :oh - 1, :ow - 1] def unfold_wo_center(x, kernel_size: int, dilation: int) -> Tensor: """unfold_wo_center, used in original implement in BoxInst: https://github.com/aim-uofa/AdelaiDet/blob/\ 4a3a1f7372c35b48ebf5f6adc59f135a0fa28d60/\ adet/modeling/condinst/condinst.py#L53 """ assert x.dim() == 4 assert kernel_size % 2 == 1 # using SAME padding padding = (kernel_size + (dilation - 1) * (kernel_size - 1)) // 2 unfolded_x = F.unfold( x, kernel_size=kernel_size, padding=padding, dilation=dilation) unfolded_x = unfolded_x.reshape( x.size(0), x.size(1), -1, x.size(2), x.size(3)) # remove the center pixels size = kernel_size**2 unfolded_x = torch.cat( (unfolded_x[:, :, :size // 2], unfolded_x[:, :, size // 2 + 1:]), dim=2) return unfolded_x
23,814
35.470138
79
py
ERD
ERD-main/mmdet/models/utils/panoptic_gt_processing.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch from torch import Tensor def preprocess_panoptic_gt(gt_labels: Tensor, gt_masks: Tensor, gt_semantic_seg: Tensor, num_things: int, num_stuff: int) -> Tuple[Tensor, Tensor]: """Preprocess the ground truth for a image. Args: gt_labels (Tensor): Ground truth labels of each bbox, with shape (num_gts, ). gt_masks (BitmapMasks): Ground truth masks of each instances of a image, shape (num_gts, h, w). gt_semantic_seg (Tensor | None): Ground truth of semantic segmentation with the shape (1, h, w). [0, num_thing_class - 1] means things, [num_thing_class, num_class-1] means stuff, 255 means VOID. It's None when training instance segmentation. Returns: tuple[Tensor, Tensor]: a tuple containing the following targets. - labels (Tensor): Ground truth class indices for a image, with shape (n, ), n is the sum of number of stuff type and number of instance in a image. - masks (Tensor): Ground truth mask for a image, with shape (n, h, w). Contains stuff and things when training panoptic segmentation, and things only when training instance segmentation. """ num_classes = num_things + num_stuff things_masks = gt_masks.to_tensor( dtype=torch.bool, device=gt_labels.device) if gt_semantic_seg is None: masks = things_masks.long() return gt_labels, masks things_labels = gt_labels gt_semantic_seg = gt_semantic_seg.squeeze(0) semantic_labels = torch.unique( gt_semantic_seg, sorted=False, return_inverse=False, return_counts=False) stuff_masks_list = [] stuff_labels_list = [] for label in semantic_labels: if label < num_things or label >= num_classes: continue stuff_mask = gt_semantic_seg == label stuff_masks_list.append(stuff_mask) stuff_labels_list.append(label) if len(stuff_masks_list) > 0: stuff_masks = torch.stack(stuff_masks_list, dim=0) stuff_labels = torch.stack(stuff_labels_list, dim=0) labels = torch.cat([things_labels, stuff_labels], dim=0) masks = torch.cat([things_masks, stuff_masks], dim=0) else: labels = things_labels masks = things_masks masks = masks.long() return labels, masks
2,575
35.28169
74
py
ERD
ERD-main/mmdet/models/utils/make_divisible.py
# Copyright (c) OpenMMLab. All rights reserved. def make_divisible(value, divisor, min_value=None, min_ratio=0.9): """Make divisible function. This function rounds the channel number to the nearest value that can be divisible by the divisor. It is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by divisor. It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa Args: value (int): The original channel number. divisor (int): The divisor to fully divide the channel number. min_value (int): The minimum value of the output channel. Default: None, means that the minimum value equal to the divisor. min_ratio (float): The minimum ratio of the rounded channel number to the original channel number. Default: 0.9. Returns: int: The modified output channel number. """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than (1-min_ratio). if new_value < min_ratio * value: new_value += divisor return new_value
1,279
43.137931
116
py
ERD
ERD-main/mmdet/models/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .gaussian_target import (gather_feat, gaussian_radius, gen_gaussian_target, get_local_maximum, get_topk_from_heatmap, transpose_and_gather_feat) from .make_divisible import make_divisible from .misc import (aligned_bilinear, center_of_mass, empty_instances, filter_gt_instances, filter_scores_and_topk, flip_tensor, generate_coordinate, images_to_levels, interpolate_as, levels_to_images, mask2ndarray, multi_apply, relative_coordinate_maps, rename_loss_dict, reweight_loss_dict, samplelist_boxtype2tensor, select_single_mlvl, sigmoid_geometric_mean, unfold_wo_center, unmap, unpack_gt_instances) from .panoptic_gt_processing import preprocess_panoptic_gt from .point_sample import (get_uncertain_point_coords_with_randomness, get_uncertainty) __all__ = [ 'gaussian_radius', 'gen_gaussian_target', 'make_divisible', 'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat', 'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat', 'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness', 'get_uncertainty', 'unpack_gt_instances', 'empty_instances', 'center_of_mass', 'filter_scores_and_topk', 'flip_tensor', 'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply', 'select_single_mlvl', 'unmap', 'images_to_levels', 'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict', 'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear', 'unfold_wo_center' ]
1,748
55.419355
79
py
ERD
ERD-main/mmdet/models/task_modules/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import * # noqa: F401,F403 from .builder import (ANCHOR_GENERATORS, BBOX_ASSIGNERS, BBOX_CODERS, BBOX_SAMPLERS, IOU_CALCULATORS, MATCH_COSTS, PRIOR_GENERATORS, build_anchor_generator, build_assigner, build_bbox_coder, build_iou_calculator, build_match_cost, build_prior_generator, build_sampler) from .coders import * # noqa: F401,F403 from .prior_generators import * # noqa: F401,F403 from .samplers import * # noqa: F401,F403 __all__ = [ 'ANCHOR_GENERATORS', 'PRIOR_GENERATORS', 'BBOX_ASSIGNERS', 'BBOX_SAMPLERS', 'MATCH_COSTS', 'BBOX_CODERS', 'IOU_CALCULATORS', 'build_anchor_generator', 'build_prior_generator', 'build_assigner', 'build_sampler', 'build_iou_calculator', 'build_match_cost', 'build_bbox_coder' ]
889
48.444444
79
py
ERD
ERD-main/mmdet/models/task_modules/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmdet.registry import TASK_UTILS PRIOR_GENERATORS = TASK_UTILS ANCHOR_GENERATORS = TASK_UTILS BBOX_ASSIGNERS = TASK_UTILS BBOX_SAMPLERS = TASK_UTILS BBOX_CODERS = TASK_UTILS MATCH_COSTS = TASK_UTILS IOU_CALCULATORS = TASK_UTILS def build_bbox_coder(cfg, **default_args): """Builder of box coder.""" warnings.warn('``build_sampler`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args) def build_iou_calculator(cfg, default_args=None): """Builder of IoU calculator.""" warnings.warn( '``build_iou_calculator`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args) def build_match_cost(cfg, default_args=None): """Builder of IoU calculator.""" warnings.warn('``build_match_cost`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args) def build_assigner(cfg, **default_args): """Builder of box assigner.""" warnings.warn('``build_assigner`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args) def build_sampler(cfg, **default_args): """Builder of box sampler.""" warnings.warn('``build_sampler`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args) def build_prior_generator(cfg, default_args=None): warnings.warn( '``build_prior_generator`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args) def build_anchor_generator(cfg, default_args=None): warnings.warn( '``build_anchor_generator`` would be deprecated soon, please use ' '``mmdet.registry.TASK_UTILS.build()`` ') return TASK_UTILS.build(cfg, default_args=default_args)
2,208
34.063492
78
py
ERD
ERD-main/mmdet/models/task_modules/assigners/assign_result.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from torch import Tensor from mmdet.utils import util_mixins class AssignResult(util_mixins.NiceRepr): """Stores assignments between predicted and truth boxes. Attributes: num_gts (int): the number of truth boxes considered when computing this assignment gt_inds (Tensor): for each predicted box indicates the 1-based index of the assigned truth box. 0 means unassigned and -1 means ignore. max_overlaps (Tensor): the iou between the predicted box and its assigned truth box. labels (Tensor): If specified, for each predicted box indicates the category label of the assigned truth box. Example: >>> # An assign result between 4 predicted boxes and 9 true boxes >>> # where only two boxes were assigned. >>> num_gts = 9 >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) >>> labels = torch.LongTensor([0, 3, 4, 0]) >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,), labels.shape=(4,))> >>> # Force addition of gt labels (when adding gt as proposals) >>> new_labels = torch.LongTensor([3, 4, 5]) >>> self.add_gt_(new_labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,), labels.shape=(7,))> """ def __init__(self, num_gts: int, gt_inds: Tensor, max_overlaps: Tensor, labels: Tensor) -> None: self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels # Interface for possible user-defined properties self._extra_properties = {} @property def num_preds(self): """int: the number of predictions in this assignment""" return len(self.gt_inds) def set_extra_property(self, key, value): """Set user-defined new property.""" assert key not in self.info self._extra_properties[key] = value def get_extra_property(self, key): """Get user-defined property.""" return self._extra_properties.get(key, None) @property def info(self): """dict: a dictionary of info about the object""" basic_info = { 'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels, } basic_info.update(self._extra_properties) return basic_info def __nice__(self): """str: a "nice" summary string describing this assign result""" parts = [] parts.append(f'num_gts={self.num_gts!r}') if self.gt_inds is None: parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if self.max_overlaps is None: parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append('max_overlaps.shape=' f'{tuple(self.max_overlaps.shape)!r}') if self.labels is None: parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): """Create random AssignResult for tests or debugging. Args: num_preds: number of predicted boxes num_gts: number of true boxes p_ignore (float): probability of a predicted box assigned to an ignored truth p_assigned (float): probability of a predicted box not being assigned p_use_label (float | bool): with labels or not rng (None | int | numpy.random.RandomState): seed or state Returns: :obj:`AssignResult`: Randomly generated assign results. Example: >>> from mmdet.models.task_modules.assigners.assign_result import * # NOQA >>> self = AssignResult.random() >>> print(self.info) """ from ..samplers.sampling_result import ensure_rng rng = ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) num_classes = kwargs.get('num_classes', 3) if num_gts is None: num_gts = rng.randint(0, 8) if num_preds is None: num_preds = rng.randint(0, 16) if num_gts == 0: max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) labels = torch.zeros(num_preds, dtype=torch.int64) else: import numpy as np # Create an overlap for each predicted box max_overlaps = torch.from_numpy(rng.rand(num_preds)) # Construct gt_inds for each predicted box is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) # maximum number of assignments constraints n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = torch.from_numpy( rng.rand(num_preds) < p_ignore) & is_assigned gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy( rng.randint(1, num_gts + 1, size=num_preds)) gt_inds[is_ignore] = -1 gt_inds[~is_assigned] = 0 max_overlaps[~is_assigned] = 0 if num_classes == 0: labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy( # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class rng.randint(0, num_classes, size=num_preds)) labels[~is_assigned] = 0 self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): """Add ground truth as assigned results. Args: gt_labels (torch.Tensor): Labels of gt boxes """ self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) self.labels = torch.cat([gt_labels, self.labels])
7,470
36.542714
87
py
ERD
ERD-main/mmdet/models/task_modules/assigners/dynamic_soft_label_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import BaseBoxes from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 EPS = 1.0e-7 def center_of_mass(masks: Tensor, eps: float = 1e-7) -> Tensor: """Compute the masks center of mass. Args: masks: Mask tensor, has shape (num_masks, H, W). eps: a small number to avoid normalizer to be zero. Defaults to 1e-7. Returns: Tensor: The masks center of mass. Has shape (num_masks, 2). """ n, h, w = masks.shape grid_h = torch.arange(h, device=masks.device)[:, None] grid_w = torch.arange(w, device=masks.device) normalizer = masks.sum(dim=(1, 2)).float().clamp(min=eps) center_y = (masks * grid_h).sum(dim=(1, 2)) / normalizer center_x = (masks * grid_w).sum(dim=(1, 2)) / normalizer center = torch.cat([center_x[:, None], center_y[:, None]], dim=1) return center @TASK_UTILS.register_module() class DynamicSoftLabelAssigner(BaseAssigner): """Computes matching between predictions and ground truth with dynamic soft label assignment. Args: soft_center_radius (float): Radius of the soft center prior. Defaults to 3.0. topk (int): Select top-k predictions to calculate dynamic k best matches for each gt. Defaults to 13. iou_weight (float): The scale factor of iou cost. Defaults to 3.0. iou_calculator (ConfigType): Config of overlaps Calculator. Defaults to dict(type='BboxOverlaps2D'). """ def __init__( self, soft_center_radius: float = 3.0, topk: int = 13, iou_weight: float = 3.0, iou_calculator: ConfigType = dict(type='BboxOverlaps2D') ) -> None: self.soft_center_radius = soft_center_radius self.topk = topk self.iou_weight = iou_weight self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to priors. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: obj:`AssignResult`: The assigned result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels num_gt = gt_bboxes.size(0) decoded_bboxes = pred_instances.bboxes pred_scores = pred_instances.scores priors = pred_instances.priors num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) prior_center = priors[:, :2] if isinstance(gt_bboxes, BaseBoxes): is_in_gts = gt_bboxes.find_inside_points(prior_center) else: # Tensor boxes will be treated as horizontal boxes by defaults lt_ = prior_center[:, None] - gt_bboxes[:, :2] rb_ = gt_bboxes[:, 2:] - prior_center[:, None] deltas = torch.cat([lt_, rb_], dim=-1) is_in_gts = deltas.min(dim=-1).values > 0 valid_mask = is_in_gts.sum(dim=1) > 0 valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_valid == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) if hasattr(gt_instances, 'masks'): gt_center = center_of_mass(gt_instances.masks, eps=EPS) elif isinstance(gt_bboxes, BaseBoxes): gt_center = gt_bboxes.centers else: # Tensor boxes will be treated as horizontal boxes by defaults gt_center = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2.0 valid_prior = priors[valid_mask] strides = valid_prior[:, 2] distance = (valid_prior[:, None, :2] - gt_center[None, :, :] ).pow(2).sum(-1).sqrt() / strides[:, None] soft_center_prior = torch.pow(10, distance - self.soft_center_radius) pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + EPS) * self.iou_weight gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) soft_label = gt_onehot_label * pairwise_ious[..., None] scale_factor = soft_label - valid_pred_scores.sigmoid() soft_cls_cost = F.binary_cross_entropy_with_logits( valid_pred_scores, soft_label, reduction='none') * scale_factor.abs().pow(2.0) soft_cls_cost = soft_cls_cost.sum(dim=-1) cost_matrix = soft_cls_cost + iou_cost + soft_center_prior matched_pred_ious, matched_gt_inds = self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, num_gt: int, valid_mask: Tensor) -> Tuple[Tensor, Tensor]: """Use IoU and matching cost to calculate the dynamic top-k positive targets. Same as SimOTA. Args: cost (Tensor): Cost matrix. pairwise_ious (Tensor): Pairwise iou matrix. num_gt (int): Number of gt. valid_mask (Tensor): Mask for valid bboxes. Returns: tuple: matched ious and gt indexes. """ matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) matching_matrix[:, gt_idx][pos_idx] = 1 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds
9,847
42.192982
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/sim_ota_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000.0 EPS = 1.0e-7 @TASK_UTILS.register_module() class SimOTAAssigner(BaseAssigner): """Computes matching between predictions and ground truth. Args: center_radius (float): Ground truth center size to judge whether a prior is in center. Defaults to 2.5. candidate_topk (int): The candidate top-k which used to get top-k ious to calculate dynamic-k. Defaults to 10. iou_weight (float): The scale factor for regression iou cost. Defaults to 3.0. cls_weight (float): The scale factor for classification cost. Defaults to 1.0. iou_calculator (ConfigType): Config of overlaps Calculator. Defaults to dict(type='BboxOverlaps2D'). """ def __init__(self, center_radius: float = 2.5, candidate_topk: int = 10, iou_weight: float = 3.0, cls_weight: float = 1.0, iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): self.center_radius = center_radius self.candidate_topk = candidate_topk self.iou_weight = iou_weight self.cls_weight = cls_weight self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to priors using SimOTA. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: obj:`AssignResult`: The assigned result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels num_gt = gt_bboxes.size(0) decoded_bboxes = pred_instances.bboxes pred_scores = pred_instances.scores priors = pred_instances.priors num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( priors, gt_bboxes) valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_valid == 0: # No valid bboxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) pairwise_ious = self.iou_calculator(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + EPS) gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) # disable AMP autocast and calculate BCE with FP32 to avoid overflow with torch.cuda.amp.autocast(enabled=False): cls_cost = ( F.binary_cross_entropy( valid_pred_scores.to(dtype=torch.float32), gt_onehot_label, reduction='none', ).sum(-1).to(dtype=valid_pred_scores.dtype)) cost_matrix = ( cls_cost * self.cls_weight + iou_cost * self.iou_weight + (~is_in_boxes_and_center) * INF) matched_pred_ious, matched_gt_inds = \ self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def get_in_gt_and_in_center_info( self, priors: Tensor, gt_bboxes: Tensor) -> Tuple[Tensor, Tensor]: """Get the information of which prior is in gt bboxes and gt center priors.""" num_gt = gt_bboxes.size(0) repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) # is prior centers in gt bboxes, shape: [n_prior, n_gt] l_ = repeated_x - gt_bboxes[:, 0] t_ = repeated_y - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - repeated_x b_ = gt_bboxes[:, 3] - repeated_y deltas = torch.stack([l_, t_, r_, b_], dim=1) is_in_gts = deltas.min(dim=1).values > 0 is_in_gts_all = is_in_gts.sum(dim=1) > 0 # is prior centers in gt centers gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 ct_box_l = gt_cxs - self.center_radius * repeated_stride_x ct_box_t = gt_cys - self.center_radius * repeated_stride_y ct_box_r = gt_cxs + self.center_radius * repeated_stride_x ct_box_b = gt_cys + self.center_radius * repeated_stride_y cl_ = repeated_x - ct_box_l ct_ = repeated_y - ct_box_t cr_ = ct_box_r - repeated_x cb_ = ct_box_b - repeated_y ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) is_in_cts = ct_deltas.min(dim=1).values > 0 is_in_cts_all = is_in_cts.sum(dim=1) > 0 # in boxes or in centers, shape: [num_priors] is_in_gts_or_centers = is_in_gts_all | is_in_cts_all # both in boxes and centers, shape: [num_fg, num_gt] is_in_boxes_and_centers = ( is_in_gts[is_in_gts_or_centers, :] & is_in_cts[is_in_gts_or_centers, :]) return is_in_gts_or_centers, is_in_boxes_and_centers def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor, num_gt: int, valid_mask: Tensor) -> Tuple[Tensor, Tensor]: """Use IoU and matching cost to calculate the dynamic top-k positive targets.""" matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) matching_matrix[:, gt_idx][pos_idx] = 1 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds
9,943
43.392857
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/atss_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import List, Optional import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner def bbox_center_distance(bboxes: Tensor, priors: Tensor) -> Tensor: """Compute the center distance between bboxes and priors. Args: bboxes (Tensor): Shape (n, 4) for , "xyxy" format. priors (Tensor): Shape (n, 4) for priors, "xyxy" format. Returns: Tensor: Center distances between bboxes and priors. """ bbox_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 bbox_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 bbox_points = torch.stack((bbox_cx, bbox_cy), dim=1) priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 priors_points = torch.stack((priors_cx, priors_cy), dim=1) distances = (priors_points[:, None, :] - bbox_points[None, :, :]).pow(2).sum(-1).sqrt() return distances @TASK_UTILS.register_module() class ATSSAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each prior. Each proposals will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt If ``alpha`` is not None, it means that the dynamic cost ATSSAssigner is adopted, which is currently only used in the DDOD. Args: topk (int): number of priors selected in each level alpha (float, optional): param of cost rate for each proposal only in DDOD. Defaults to None. iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou calculator. Defaults to ``dict(type='BboxOverlaps2D')`` ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. Defaults to -1. """ def __init__(self, topk: int, alpha: Optional[float] = None, iou_calculator: ConfigType = dict(type='BboxOverlaps2D'), ignore_iof_thr: float = -1) -> None: self.topk = topk self.alpha = alpha self.iou_calculator = TASK_UTILS.build(iou_calculator) self.ignore_iof_thr = ignore_iof_thr # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py def assign( self, pred_instances: InstanceData, num_level_priors: List[int], gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None ) -> AssignResult: """Assign gt to priors. The assignment is done in following steps 1. compute iou between all prior (prior of all pyramid levels) and gt 2. compute center distance between all prior and gt 3. on each pyramid level, for each gt, select k prior whose center are closest to the gt center, so we total select k*l prior as candidates for each gt 4. get corresponding iou for the these candidates, and compute the mean and std, set mean + std as the iou threshold 5. select these candidates whose iou are greater than or equal to the threshold as positive 6. limit the positive sample's center in gt If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` are not None, the overlaps calculation in the first step will also include dynamic cost, which is currently only used in the DDOD. Args: pred_instances (:obj:`InstaceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors, points, or bboxes predicted by the model, shape(n, 4). num_level_priors (List): Number of bboxes in each level gt_instances (:obj:`InstaceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstaceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels if gt_instances_ignore is not None: gt_bboxes_ignore = gt_instances_ignore.bboxes else: gt_bboxes_ignore = None INF = 100000000 priors = priors[:, :4] num_gt, num_priors = gt_bboxes.size(0), priors.size(0) message = 'Invalid alpha parameter because cls_scores or ' \ 'bbox_preds are None. If you want to use the ' \ 'cost-based ATSSAssigner, please set cls_scores, ' \ 'bbox_preds and self.alpha at the same time. ' # compute iou between all bbox and gt if self.alpha is None: # ATSSAssigner overlaps = self.iou_calculator(priors, gt_bboxes) if ('scores' in pred_instances or 'bboxes' in pred_instances): warnings.warn(message) else: # Dynamic cost ATSSAssigner in DDOD assert ('scores' in pred_instances and 'bboxes' in pred_instances), message cls_scores = pred_instances.scores bbox_preds = pred_instances.bboxes # compute cls cost for bbox and GT cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) # compute iou between all bbox and gt overlaps = self.iou_calculator(bbox_preds, gt_bboxes) # make sure that we are in element-wise multiplication assert cls_cost.shape == overlaps.shape # overlaps is actually a cost matrix overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha # assign 0 by default assigned_gt_inds = overlaps.new_full((num_priors, ), 0, dtype=torch.long) if num_gt == 0 or num_priors == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_priors, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = overlaps.new_full((num_priors, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) # compute center distance between all bbox and gt distances = bbox_center_distance(gt_bboxes, priors) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0): ignore_overlaps = self.iou_calculator( priors, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr distances[ignore_idxs, :] = INF assigned_gt_inds[ignore_idxs] = -1 # Selecting candidates based on the center distance candidate_idxs = [] start_idx = 0 for level, priors_per_level in enumerate(num_level_priors): # on each pyramid level, for each gt, # select k bbox whose center are closest to the gt center end_idx = start_idx + priors_per_level distances_per_level = distances[start_idx:end_idx, :] selectable_k = min(self.topk, priors_per_level) _, topk_idxs_per_level = distances_per_level.topk( selectable_k, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + start_idx) start_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # get corresponding iou for the these candidates, and compute the # mean and std, set mean + std as the iou threshold candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] overlaps_mean_per_gt = candidate_overlaps.mean(0) overlaps_std_per_gt = candidate_overlaps.std(0) overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] # limit the positive sample's center in gt for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_priors priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 ep_priors_cx = priors_cx.view(1, -1).expand( num_gt, num_priors).contiguous().view(-1) ep_priors_cy = priors_cy.view(1, -1).expand( num_gt, num_priors).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # prior center and gt side l_ = ep_priors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_priors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_priors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_priors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest IoU will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assigned_labels = assigned_gt_inds.new_full((num_priors, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
11,003
42.152941
87
py
ERD
ERD-main/mmdet/models/task_modules/assigners/multi_instance_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .max_iou_assigner import MaxIoUAssigner @TASK_UTILS.register_module() class MultiInstanceAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each proposal bbox. If we need to use a proposal box to generate multiple predict boxes, `MultiInstanceAssigner` can assign multiple gt to each proposal box. Args: num_instance (int): How many bboxes are predicted by each proposal box. """ def __init__(self, num_instance: int = 2, **kwargs): super().__init__(**kwargs) self.num_instance = num_instance def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. This method assign gt bboxes to every bbox (proposal/anchor), each bbox is assigned a set of gts, and the number of gts in this set is defined by `self.num_instance`. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes priors = pred_instances.priors # Set the FG label to 1 and add ignored annotations gt_labels = gt_instances.labels + 1 if gt_instances_ignore is not None: gt_bboxes_ignore = gt_instances_ignore.bboxes if hasattr(gt_instances_ignore, 'labels'): gt_labels_ignore = gt_instances_ignore.labels else: gt_labels_ignore = torch.ones_like(gt_bboxes_ignore)[:, 0] * -1 else: gt_bboxes_ignore = None gt_labels_ignore = None assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = priors.device priors = priors.cpu() gt_bboxes = gt_bboxes.cpu() gt_labels = gt_labels.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() gt_labels_ignore = gt_labels_ignore.cpu() if gt_bboxes_ignore is not None: all_bboxes = torch.cat([gt_bboxes, gt_bboxes_ignore], dim=0) all_labels = torch.cat([gt_labels, gt_labels_ignore], dim=0) else: all_bboxes = gt_bboxes all_labels = gt_labels all_priors = torch.cat([priors, all_bboxes], dim=0) overlaps_normal = self.iou_calculator( all_priors, all_bboxes, mode='iou') overlaps_ignore = self.iou_calculator( all_priors, all_bboxes, mode='iof') gt_ignore_mask = all_labels.eq(-1).repeat(all_priors.shape[0], 1) overlaps_normal = overlaps_normal * ~gt_ignore_mask overlaps_ignore = overlaps_ignore * gt_ignore_mask overlaps_normal, overlaps_normal_indices = overlaps_normal.sort( descending=True, dim=1) overlaps_ignore, overlaps_ignore_indices = overlaps_ignore.sort( descending=True, dim=1) # select the roi with the higher score max_overlaps_normal = overlaps_normal[:, :self.num_instance].flatten() gt_assignment_normal = overlaps_normal_indices[:, :self. num_instance].flatten() max_overlaps_ignore = overlaps_ignore[:, :self.num_instance].flatten() gt_assignment_ignore = overlaps_ignore_indices[:, :self. num_instance].flatten() # ignore or not ignore_assign_mask = (max_overlaps_normal < self.pos_iou_thr) * ( max_overlaps_ignore > max_overlaps_normal) overlaps = (max_overlaps_normal * ~ignore_assign_mask) + ( max_overlaps_ignore * ignore_assign_mask) gt_assignment = (gt_assignment_normal * ~ignore_assign_mask) + ( gt_assignment_ignore * ignore_assign_mask) assigned_labels = all_labels[gt_assignment] fg_mask = (overlaps >= self.pos_iou_thr) * (assigned_labels != -1) bg_mask = (overlaps < self.neg_iou_thr) * (overlaps >= 0) assigned_labels[fg_mask] = 1 assigned_labels[bg_mask] = 0 overlaps = overlaps.reshape(-1, self.num_instance) gt_assignment = gt_assignment.reshape(-1, self.num_instance) assigned_labels = assigned_labels.reshape(-1, self.num_instance) assign_result = AssignResult( num_gts=all_bboxes.size(0), gt_inds=gt_assignment, max_overlaps=overlaps, labels=assigned_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
6,188
42.893617
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/center_region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner def scale_boxes(bboxes: Tensor, scale: float) -> Tensor: """Expand an array of boxes by a given scale. Args: bboxes (Tensor): Shape (m, 4) scale (float): The scale factor of bboxes Returns: Tensor: Shape (m, 4). Scaled bboxes """ assert bboxes.size(1) == 4 w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_scaled = torch.zeros_like(bboxes) boxes_scaled[:, 0] = x_c - w_half boxes_scaled[:, 2] = x_c + w_half boxes_scaled[:, 1] = y_c - h_half boxes_scaled[:, 3] = y_c + h_half return boxes_scaled def is_located_in(points: Tensor, bboxes: Tensor) -> Tensor: """Are points located in bboxes. Args: points (Tensor): Points, shape: (m, 2). bboxes (Tensor): Bounding boxes, shape: (n, 4). Return: Tensor: Flags indicating if points are located in bboxes, shape: (m, n). """ assert points.size(1) == 2 assert bboxes.size(1) == 4 return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) def bboxes_area(bboxes: Tensor) -> Tensor: """Compute the area of an array of bboxes. Args: bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) Returns: Tensor: Area of the bboxes. Shape: (m, ) """ assert bboxes.size(1) == 4 w = (bboxes[:, 2] - bboxes[:, 0]) h = (bboxes[:, 3] - bboxes[:, 1]) areas = w * h return areas @TASK_UTILS.register_module() class CenterRegionAssigner(BaseAssigner): """Assign pixels at the center region of a bbox as positive. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: negative samples - semi-positive numbers: positive sample, index (0-based) of assigned gt Args: pos_scale (float): Threshold within which pixels are labelled as positive. neg_scale (float): Threshold above which pixels are labelled as positive. min_pos_iof (float): Minimum iof of a pixel with a gt to be labelled as positive. Default: 1e-2 ignore_gt_scale (float): Threshold within which the pixels are ignored when the gt is labelled as shadowed. Default: 0.5 foreground_dominate (bool): If True, the bbox will be assigned as positive when a gt's kernel region overlaps with another's shadowed (ignored) region, otherwise it is set as ignored. Default to False. iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps Calculator. """ def __init__( self, pos_scale: float, neg_scale: float, min_pos_iof: float = 1e-2, ignore_gt_scale: float = 0.5, foreground_dominate: bool = False, iou_calculator: ConfigType = dict(type='BboxOverlaps2D') ) -> None: self.pos_scale = pos_scale self.neg_scale = neg_scale self.min_pos_iof = min_pos_iof self.ignore_gt_scale = ignore_gt_scale self.foreground_dominate = foreground_dominate self.iou_calculator = TASK_UTILS.build(iou_calculator) def get_gt_priorities(self, gt_bboxes: Tensor) -> Tensor: """Get gt priorities according to their areas. Smaller gt has higher priority. Args: gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). Returns: Tensor: The priority of gts so that gts with larger priority is more likely to be assigned. Shape (k, ) """ gt_areas = bboxes_area(gt_bboxes) # Rank all gt bbox areas. Smaller objects has larger priority _, sort_idx = gt_areas.sort(descending=True) sort_idx = sort_idx.argsort() return sort_idx def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. This method assigns gts to every prior (proposal/anchor), each prior will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assigned result. Note that shadowed_labels of shape (N, 2) is also added as an `assign_result` attribute. `shadowed_labels` is a tensor composed of N pairs of anchor_ind, class_label], where N is the number of anchors that lie in the outer region of a gt, anchor_ind is the shadowed anchor index and class_label is the shadowed class label. Example: >>> from mmengine.structures import InstanceData >>> self = CenterRegionAssigner(0.2, 0.2) >>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10], ... [10, 10, 20, 20]]) >>> gt_instances = InstanceData() >>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 10]]) >>> gt_instances.labels = torch.Tensor([0]) >>> assign_result = self.assign(pred_instances, gt_instances) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ # There are in total 5 steps in the pixel assignment # 1. Find core (the center region, say inner 0.2) # and shadow (the relatively ourter part, say inner 0.2-0.5) # regions of every gt. # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in # the image. # 3.1. For overlapping objects, the prior bboxes in gt_core is # assigned with the object with smallest area # 4. Assign prior bboxes with class label according to its gt id. # 4.1. Assign -1 to prior bboxes lying in shadowed gts # 4.2. Assign positive prior boxes with the corresponding label # 5. Find pixels lying in the shadow of an object and assign them with # background label, but set the loss weight of its corresponding # gt to zero. # TODO not extract bboxes in assign. gt_bboxes = gt_instances.bboxes priors = pred_instances.priors gt_labels = gt_instances.labels assert priors.size(1) == 4, 'priors must have size of 4' # 1. Find core positive and shadow region of every gt gt_core = scale_boxes(gt_bboxes, self.pos_scale) gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) # 2. Find prior bboxes that lie in gt_core and gt_shadow regions prior_centers = (priors[:, 2:4] + priors[:, 0:2]) / 2 # The center points lie within the gt boxes is_prior_in_gt = is_located_in(prior_centers, gt_bboxes) # Only calculate prior and gt_core IoF. This enables small prior bboxes # to match large gts prior_and_gt_core_overlaps = self.iou_calculator( priors, gt_core, mode='iof') # The center point of effective priors should be within the gt box is_prior_in_gt_core = is_prior_in_gt & ( prior_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) is_prior_in_gt_shadow = ( self.iou_calculator(priors, gt_shadow, mode='iof') > self.min_pos_iof) # Rule out center effective positive pixels is_prior_in_gt_shadow &= (~is_prior_in_gt_core) num_gts, num_priors = gt_bboxes.size(0), priors.size(0) if num_gts == 0 or num_priors == 0: # If no gts exist, assign all pixels to negative assigned_gt_ids = \ is_prior_in_gt_core.new_zeros((num_priors,), dtype=torch.long) pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) else: # Step 3: assign a one-hot gt id to each pixel, and smaller objects # have high priority to assign the pixel. sort_idx = self.get_gt_priorities(gt_bboxes) assigned_gt_ids, pixels_in_gt_shadow = \ self.assign_one_hot_gt_indices(is_prior_in_gt_core, is_prior_in_gt_shadow, gt_priority=sort_idx) if (gt_instances_ignore is not None and gt_instances_ignore.bboxes.numel() > 0): # No ground truth or boxes, return empty assignment gt_bboxes_ignore = gt_instances_ignore.bboxes gt_bboxes_ignore = scale_boxes( gt_bboxes_ignore, scale=self.ignore_gt_scale) is_prior_in_ignored_gts = is_located_in(prior_centers, gt_bboxes_ignore) is_prior_in_ignored_gts = is_prior_in_ignored_gts.any(dim=1) assigned_gt_ids[is_prior_in_ignored_gts] = -1 # 4. Assign prior bboxes with class label according to its gt id. # Default assigned label is the background (-1) assigned_labels = assigned_gt_ids.new_full((num_priors, ), -1) pos_inds = torch.nonzero(assigned_gt_ids > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - 1] # 5. Find pixels lying in the shadow of an object shadowed_pixel_labels = pixels_in_gt_shadow.clone() if pixels_in_gt_shadow.numel() > 0: pixel_idx, gt_idx =\ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ 'Some pixels are dually assigned to ignore and gt!' shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] override = ( assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) if self.foreground_dominate: # When a pixel is both positive and shadowed, set it as pos shadowed_pixel_labels = shadowed_pixel_labels[~override] else: # When a pixel is both pos and shadowed, set it as shadowed assigned_labels[pixel_idx[override]] = -1 assigned_gt_ids[pixel_idx[override]] = 0 assign_result = AssignResult( num_gts, assigned_gt_ids, None, labels=assigned_labels) # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) assign_result.set_extra_property('shadowed_labels', shadowed_pixel_labels) return assign_result def assign_one_hot_gt_indices( self, is_prior_in_gt_core: Tensor, is_prior_in_gt_shadow: Tensor, gt_priority: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: """Assign only one gt index to each prior box. Gts with large gt_priority are more likely to be assigned. Args: is_prior_in_gt_core (Tensor): Bool tensor indicating the prior center is in the core area of a gt (e.g. 0-0.2). Shape: (num_prior, num_gt). is_prior_in_gt_shadow (Tensor): Bool tensor indicating the prior center is in the shadowed area of a gt (e.g. 0.2-0.5). Shape: (num_prior, num_gt). gt_priority (Tensor): Priorities of gts. The gt with a higher priority is more likely to be assigned to the bbox when the bbox match with multiple gts. Shape: (num_gt, ). Returns: tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - assigned_gt_inds: The assigned gt index of each prior bbox \ (i.e. index from 1 to num_gts). Shape: (num_prior, ). - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ shape (num_ignore, 2) with first column being the shadowed prior \ bbox indices and the second column the shadowed gt \ indices (1-based). """ num_bboxes, num_gts = is_prior_in_gt_core.shape if gt_priority is None: gt_priority = torch.arange( num_gts, device=is_prior_in_gt_core.device) assert gt_priority.size(0) == num_gts # The bigger gt_priority, the more preferable to be assigned # The assigned inds are by default 0 (background) assigned_gt_inds = is_prior_in_gt_core.new_zeros((num_bboxes, ), dtype=torch.long) # Shadowed bboxes are assigned to be background. But the corresponding # label is ignored during loss calculation, which is done through # shadowed_gt_inds shadowed_gt_inds = torch.nonzero(is_prior_in_gt_shadow, as_tuple=False) if is_prior_in_gt_core.sum() == 0: # No gt match shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue return assigned_gt_inds, shadowed_gt_inds # The priority of each prior box and gt pair. If one prior box is # matched bo multiple gts. Only the pair with the highest priority # is saved pair_priority = is_prior_in_gt_core.new_full((num_bboxes, num_gts), -1, dtype=torch.long) # Each bbox could match with multiple gts. # The following codes deal with this situation # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) inds_of_match = torch.any(is_prior_in_gt_core, dim=1) # The matched gt index of each positive bbox. Length >= num_pos_anchor # , since one bbox could match multiple gts matched_bbox_gt_inds = torch.nonzero( is_prior_in_gt_core, as_tuple=False)[:, 1] # Assign priority to each bbox-gt pair. pair_priority[is_prior_in_gt_core] = gt_priority[matched_bbox_gt_inds] _, argmax_priority = pair_priority[inds_of_match].max(dim=1) assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based # Zero-out the assigned anchor box to filter the shadowed gt indices is_prior_in_gt_core[inds_of_match, argmax_priority] = 0 # Concat the shadowed indices due to overlapping with that out side of # effective scale. shape: (total_num_ignore, 2) shadowed_gt_inds = torch.cat( (shadowed_gt_inds, torch.nonzero(is_prior_in_gt_core, as_tuple=False)), dim=0) # Change `is_prior_in_gt_core` back to keep arguments intact. is_prior_in_gt_core[inds_of_match, argmax_priority] = 1 # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` if shadowed_gt_inds.numel() > 0: shadowed_gt_inds[:, 1] += 1 return assigned_gt_inds, shadowed_gt_inds
16,747
44.634877
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from ..prior_generators import anchor_inside_flags from .assign_result import AssignResult from .base_assigner import BaseAssigner def calc_region( bbox: Tensor, ratio: float, stride: int, featmap_size: Optional[Tuple[int, int]] = None) -> Tuple[Tensor]: """Calculate region of the box defined by the ratio, the ratio is from the center of the box to every edge.""" # project bbox on the feature f_bbox = bbox / stride x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2) def anchor_ctr_inside_region_flags(anchors: Tensor, stride: int, region: Tuple[Tensor]) -> Tensor: """Get the flag indicate whether anchor centers are inside regions.""" x1, y1, x2, y2 = region f_anchors = anchors / stride x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) return flags @TASK_UTILS.register_module() class RegionAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: center_ratio (float): ratio of the region in the center of the bbox to define positive sample. ignore_ratio (float): ratio of the region to define ignore samples. """ def __init__(self, center_ratio: float = 0.2, ignore_ratio: float = 0.5) -> None: self.center_ratio = center_ratio self.ignore_ratio = ignore_ratio def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: dict, featmap_sizes: List[Tuple[int, int]], num_level_anchors: List[int], anchor_scale: int, anchor_strides: List[int], gt_instances_ignore: Optional[InstanceData] = None, allowed_border: int = 0) -> AssignResult: """Assign gt to anchors. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, and the order matters. 1. Assign every anchor to 0 (negative) 2. (For each gt_bboxes) Compute ignore flags based on ignore_region then assign -1 to anchors w.r.t. ignore flags 3. (For each gt_bboxes) Compute pos flags based on center_region then assign gt_bboxes to anchors w.r.t. pos flags 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor level then assign -1 to anchors w.r.t. ignore flags 5. Assign anchor outside of image to -1 Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). img_meta (dict): Meta info of image. featmap_sizes (list[tuple[int, int]]): Feature map size each level. num_level_anchors (list[int]): The number of anchors in each level. anchor_scale (int): Scale of the anchor. anchor_strides (list[int]): Stride of the anchor. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: :obj:`AssignResult`: The assign result. """ if gt_instances_ignore is not None: raise NotImplementedError num_gts = len(gt_instances) num_bboxes = len(pred_instances) gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels flat_anchors = pred_instances.priors flat_valid_flags = pred_instances.valid_flags mlvl_anchors = torch.split(flat_anchors, num_level_anchors) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), dtype=torch.long) assigned_labels = gt_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=max_overlaps, labels=assigned_labels) num_lvls = len(mlvl_anchors) r1 = (1 - self.center_ratio) / 2 r2 = (1 - self.ignore_ratio) / 2 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() # 1. assign 0 (negative) by default mlvl_assigned_gt_inds = [] mlvl_ignore_flags = [] for lvl in range(num_lvls): assigned_gt_inds = gt_bboxes.new_full((num_level_anchors[lvl], ), 0, dtype=torch.long) ignore_flags = torch.zeros_like(assigned_gt_inds) mlvl_assigned_gt_inds.append(assigned_gt_inds) mlvl_ignore_flags.append(ignore_flags) for gt_id in range(num_gts): lvl = target_lvls[gt_id].item() featmap_size = featmap_sizes[lvl] stride = anchor_strides[lvl] anchors = mlvl_anchors[lvl] gt_bbox = gt_bboxes[gt_id, :4] # Compute regions ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) # 2. Assign -1 to ignore flags ignore_flags = anchor_ctr_inside_region_flags( anchors, stride, ignore_region) mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 3. Assign gt_bboxes to pos flags pos_flags = anchor_ctr_inside_region_flags(anchors, stride, ctr_region) mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 # 4. Assign -1 to ignore adjacent lvl if lvl > 0: d_lvl = lvl - 1 d_anchors = mlvl_anchors[d_lvl] d_featmap_size = featmap_sizes[d_lvl] d_stride = anchor_strides[d_lvl] d_ignore_region = calc_region(gt_bbox, r2, d_stride, d_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( d_anchors, d_stride, d_ignore_region) mlvl_ignore_flags[d_lvl][ignore_flags] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 u_anchors = mlvl_anchors[u_lvl] u_featmap_size = featmap_sizes[u_lvl] u_stride = anchor_strides[u_lvl] u_ignore_region = calc_region(gt_bbox, r2, u_stride, u_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( u_anchors, u_stride, u_ignore_region) mlvl_ignore_flags[u_lvl][ignore_flags] = 1 # 4. (cont.) Assign -1 to ignore adjacent lvl for lvl in range(num_lvls): ignore_flags = mlvl_ignore_flags[lvl] mlvl_assigned_gt_inds[lvl][ignore_flags == 1] = -1 # 5. Assign -1 to anchor outside of image flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == flat_valid_flags.shape[0]) inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, img_meta['img_shape'], allowed_border) outside_flags = ~inside_flags flat_assigned_gt_inds[outside_flags] = -1 assigned_labels = torch.zeros_like(flat_assigned_gt_inds) pos_flags = flat_assigned_gt_inds > 0 assigned_labels[pos_flags] = gt_labels[flat_assigned_gt_inds[pos_flags] - 1] return AssignResult( num_gts=num_gts, gt_inds=flat_assigned_gt_inds, max_overlaps=None, labels=assigned_labels)
10,474
42.645833
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/grid_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, Union import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class GridAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple[float, float]): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). Defaults to 0. gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps Calculator. """ def __init__( self, pos_iou_thr: float, neg_iou_thr: Union[float, Tuple[float, float]], min_pos_iou: float = .0, gt_max_assign_all: bool = True, iou_calculator: ConfigType = dict(type='BboxOverlaps2D') ) -> None: self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to bboxes. The process is very much like the max iou assigner, except that positive samples are constrained within the cell that the gt boxes fell in. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 3. for each bbox within a cell, if the iou with its nearest gt > pos_iou_thr and the center of that gt falls inside the cell, assign it to that bbox 4. for each gt bbox, assign its nearest proposals within the cell the gt bbox falls in to itself. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels priors = pred_instances.priors responsible_flags = pred_instances.responsible_flags num_gts, num_priors = gt_bboxes.size(0), priors.size(0) # compute iou between all gt and priors overlaps = self.iou_calculator(gt_bboxes, priors) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_priors, ), -1, dtype=torch.long) if num_gts == 0 or num_priors == 0: # No ground truth or priors, return empty assignment max_overlaps = overlaps.new_zeros((num_priors, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = overlaps.new_full((num_priors, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # 2. assign negative: below # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts # shape of max_overlaps == argmax_overlaps == num_priors max_overlaps, argmax_overlaps = overlaps.max(dim=0) if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps <= self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, (tuple, list)): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) & (max_overlaps <= self.neg_iou_thr[1])] = 0 # 3. assign positive: falls into responsible cell and above # positive IOU threshold, the order matters. # the prior condition of comparison is to filter out all # unrelated anchors, i.e. not responsible_flags overlaps[:, ~responsible_flags.type(torch.bool)] = -1. # calculate max_overlaps again, but this time we only consider IOUs # for anchors responsible for prediction max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) pos_inds = (max_overlaps > self.pos_iou_thr) & responsible_flags.type( torch.bool) assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign positive to max overlapped anchors within responsible cell for i in range(num_gts): if gt_max_overlaps[i] > self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ responsible_flags.type(torch.bool) assigned_gt_inds[max_iou_inds] = i + 1 elif responsible_flags[gt_argmax_overlaps[i]]: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # assign labels of positive anchors assigned_labels = assigned_gt_inds.new_full((num_priors, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
7,815
42.910112
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/match_cost.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import abstractmethod from typing import Optional, Union import torch import torch.nn.functional as F from mmengine.structures import InstanceData from torch import Tensor from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_overlaps, bbox_xyxy_to_cxcywh class BaseMatchCost: """Base match cost class. Args: weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, weight: Union[float, int] = 1.) -> None: self.weight = weight @abstractmethod def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). img_meta (dict, optional): Image information. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pass @TASK_UTILS.register_module() class BBoxL1Cost(BaseMatchCost): """BBoxL1Cost. Note: ``bboxes`` in ``InstanceData`` passed in is of format 'xyxy' and its coordinates are unnormalized. Args: box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN. Defaults to 'xyxy'. weight (Union[float, int]): Cost weight. Defaults to 1. Examples: >>> from mmdet.models.task_modules.assigners. ... match_costs.match_cost import BBoxL1Cost >>> import torch >>> self = BBoxL1Cost() >>> bbox_pred = torch.rand(1, 4) >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(bbox_pred, gt_bboxes, factor) tensor([[1.6172, 1.6422]]) """ def __init__(self, box_format: str = 'xyxy', weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) assert box_format in ['xyxy', 'xywh'] self.box_format = box_format def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): ``bboxes`` inside is predicted boxes with unnormalized coordinate (x, y, x, y). gt_instances (:obj:`InstanceData`): ``bboxes`` inside is gt bboxes with unnormalized coordinate (x, y, x, y). img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_bboxes = pred_instances.bboxes gt_bboxes = gt_instances.bboxes # convert box format if self.box_format == 'xywh': gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) pred_bboxes = bbox_xyxy_to_cxcywh(pred_bboxes) # normalized img_h, img_w = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) gt_bboxes = gt_bboxes / factor pred_bboxes = pred_bboxes / factor bbox_cost = torch.cdist(pred_bboxes, gt_bboxes, p=1) return bbox_cost * self.weight @TASK_UTILS.register_module() class IoUCost(BaseMatchCost): """IoUCost. Note: ``bboxes`` in ``InstanceData`` passed in is of format 'xyxy' and its coordinates are unnormalized. Args: iou_mode (str): iou mode such as 'iou', 'giou'. Defaults to 'giou'. weight (Union[float, int]): Cost weight. Defaults to 1. Examples: >>> from mmdet.models.task_modules.assigners. ... match_costs.match_cost import IoUCost >>> import torch >>> self = IoUCost() >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> self(bboxes, gt_bboxes) tensor([[-0.1250, 0.1667], [ 0.1667, -0.5000]]) """ def __init__(self, iou_mode: str = 'giou', weight: Union[float, int] = 1.): super().__init__(weight=weight) self.iou_mode = iou_mode def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs): """Compute match cost. Args: pred_instances (:obj:`InstanceData`): ``bboxes`` inside is predicted boxes with unnormalized coordinate (x, y, x, y). gt_instances (:obj:`InstanceData`): ``bboxes`` inside is gt bboxes with unnormalized coordinate (x, y, x, y). img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_bboxes = pred_instances.bboxes gt_bboxes = gt_instances.bboxes overlaps = bbox_overlaps( pred_bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) # The 1 is a constant that doesn't change the matching, so omitted. iou_cost = -overlaps return iou_cost * self.weight @TASK_UTILS.register_module() class ClassificationCost(BaseMatchCost): """ClsSoftmaxCost. Args: weight (Union[float, int]): Cost weight. Defaults to 1. Examples: >>> from mmdet.models.task_modules.assigners. ... match_costs.match_cost import ClassificationCost >>> import torch >>> self = ClassificationCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3430, -0.3525, -0.3045], [-0.3077, -0.2931, -0.3992], [-0.3664, -0.3455, -0.2881], [-0.3343, -0.2701, -0.3956]]) """ def __init__(self, weight: Union[float, int] = 1) -> None: super().__init__(weight=weight) def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): ``scores`` inside is predicted classification logits, of shape (num_queries, num_class). gt_instances (:obj:`InstanceData`): ``labels`` inside should have shape (num_gt, ). img_meta (Optional[dict]): _description_. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_scores = pred_instances.scores gt_labels = gt_instances.labels pred_scores = pred_scores.softmax(-1) cls_cost = -pred_scores[:, gt_labels] return cls_cost * self.weight @TASK_UTILS.register_module() class FocalLossCost(BaseMatchCost): """FocalLossCost. Args: alpha (Union[float, int]): focal_loss alpha. Defaults to 0.25. gamma (Union[float, int]): focal_loss gamma. Defaults to 2. eps (float): Defaults to 1e-12. binary_input (bool): Whether the input is binary. Currently, binary_input = True is for masks input, binary_input = False is for label input. Defaults to False. weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, alpha: Union[float, int] = 0.25, gamma: Union[float, int] = 2, eps: float = 1e-12, binary_input: bool = False, weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) self.alpha = alpha self.gamma = gamma self.eps = eps self.binary_input = binary_input def _focal_loss_cost(self, cls_pred: Tensor, gt_labels: Tensor) -> Tensor: """ Args: cls_pred (Tensor): Predicted classification logits, shape (num_queries, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight def _mask_focal_loss_cost(self, cls_pred, gt_labels) -> Tensor: """ Args: cls_pred (Tensor): Predicted classification logits. in shape (num_queries, d1, ..., dn), dtype=torch.float32. gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), dtype=torch.long. Labels should be binary. Returns: Tensor: Focal cost matrix with weight in shape\ (num_queries, num_gt). """ cls_pred = cls_pred.flatten(1) gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) return cls_cost / n * self.weight def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Predicted instances which must contain ``scores`` or ``masks``. gt_instances (:obj:`InstanceData`): Ground truth which must contain ``labels`` or ``mask``. img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ if self.binary_input: pred_masks = pred_instances.masks gt_masks = gt_instances.masks return self._mask_focal_loss_cost(pred_masks, gt_masks) else: pred_scores = pred_instances.scores gt_labels = gt_instances.labels return self._focal_loss_cost(pred_scores, gt_labels) @TASK_UTILS.register_module() class DiceCost(BaseMatchCost): """Cost of mask assignments based on dice losses. Args: pred_act (bool): Whether to apply sigmoid to mask_pred. Defaults to False. eps (float): Defaults to 1e-3. naive_dice (bool): If True, use the naive dice loss in which the power of the number in the denominator is the first power. If False, use the second power that is adopted by K-Net and SOLO. Defaults to True. weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, pred_act: bool = False, eps: float = 1e-3, naive_dice: bool = True, weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) self.pred_act = pred_act self.eps = eps self.naive_dice = naive_dice def _binary_mask_dice_loss(self, mask_preds: Tensor, gt_masks: Tensor) -> Tensor: """ Args: mask_preds (Tensor): Mask prediction in shape (num_queries, *). gt_masks (Tensor): Ground truth in shape (num_gt, *) store 0 or 1, 0 for negative class and 1 for positive class. Returns: Tensor: Dice cost matrix in shape (num_queries, num_gt). """ mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) if self.naive_dice: denominator = mask_preds.sum(-1)[:, None] + \ gt_masks.sum(-1)[None, :] else: denominator = mask_preds.pow(2).sum(1)[:, None] + \ gt_masks.pow(2).sum(1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Predicted instances which must contain ``masks``. gt_instances (:obj:`InstanceData`): Ground truth which must contain ``mask``. img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_masks = pred_instances.masks gt_masks = gt_instances.masks if self.pred_act: pred_masks = pred_masks.sigmoid() dice_cost = self._binary_mask_dice_loss(pred_masks, gt_masks) return dice_cost * self.weight @TASK_UTILS.register_module() class CrossEntropyLossCost(BaseMatchCost): """CrossEntropyLossCost. Args: use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. Defaults to True. weight (Union[float, int]): Cost weight. Defaults to 1. """ def __init__(self, use_sigmoid: bool = True, weight: Union[float, int] = 1.) -> None: super().__init__(weight=weight) self.use_sigmoid = use_sigmoid def _binary_cross_entropy(self, cls_pred: Tensor, gt_labels: Tensor) -> Tensor: """ Args: cls_pred (Tensor): The prediction with shape (num_queries, 1, *) or (num_queries, *). gt_labels (Tensor): The learning label of prediction with shape (num_gt, *). Returns: Tensor: Cross entropy cost matrix in shape (num_queries, num_gt). """ cls_pred = cls_pred.flatten(1).float() gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] pos = F.binary_cross_entropy_with_logits( cls_pred, torch.ones_like(cls_pred), reduction='none') neg = F.binary_cross_entropy_with_logits( cls_pred, torch.zeros_like(cls_pred), reduction='none') cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ torch.einsum('nc,mc->nm', neg, 1 - gt_labels) cls_cost = cls_cost / n return cls_cost def __call__(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> Tensor: """Compute match cost. Args: pred_instances (:obj:`InstanceData`): Predicted instances which must contain ``scores`` or ``masks``. gt_instances (:obj:`InstanceData`): Ground truth which must contain ``labels`` or ``masks``. img_meta (Optional[dict]): Image information. Defaults to None. Returns: Tensor: Match Cost matrix of shape (num_preds, num_gts). """ pred_masks = pred_instances.masks gt_masks = gt_instances.masks if self.use_sigmoid: cls_cost = self._binary_cross_entropy(pred_masks, gt_masks) else: raise NotImplementedError return cls_cost * self.weight
16,863
35.344828
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/hungarian_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Union import torch from mmengine import ConfigDict from mmengine.structures import InstanceData from scipy.optimize import linear_sum_assignment from torch import Tensor from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class HungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of some components. For DETR the costs are weighted sum of classification cost, regression L1 cost and regression iou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: match_costs (:obj:`ConfigDict` or dict or \ List[Union[:obj:`ConfigDict`, dict]]): Match cost configs. """ def __init__( self, match_costs: Union[List[Union[dict, ConfigDict]], dict, ConfigDict] ) -> None: if isinstance(match_costs, dict): match_costs = [match_costs] elif isinstance(match_costs, list): assert len(match_costs) > 0, \ 'match_costs must not be a empty list.' self.match_costs = [ TASK_UTILS.build(match_cost) for match_cost in match_costs ] def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, img_meta: Optional[dict] = None, **kwargs) -> AssignResult: """Computes one-to-one matching based on the weighted costs. This method assign each query prediction to a ground truth or background. The `assigned_gt_inds` with -1 means don't care, 0 means negative sample, and positive number is the index (1-based) of assigned gt. The assignment is done in the following steps, the order matters. 1. assign every prediction to -1 2. compute the weighted costs 3. do Hungarian matching on CPU based on the costs 4. assign all to 0 (background) first, then for each matched pair between predictions and gts, treat this prediction as foreground and assign the corresponding gt index (plus 1) to it. Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. It may includes ``masks``, with shape (n, h, w) or (n, l). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), ``labels``, with shape (k, ) and ``masks``, with shape (k, h, w) or (k, l). img_meta (dict): Image information. Returns: :obj:`AssignResult`: The assigned result. """ assert isinstance(gt_instances.labels, Tensor) num_gts, num_preds = len(gt_instances), len(pred_instances) gt_labels = gt_instances.labels device = gt_labels.device # 1. assign -1 by default assigned_gt_inds = torch.full((num_preds, ), -1, dtype=torch.long, device=device) assigned_labels = torch.full((num_preds, ), -1, dtype=torch.long, device=device) if num_gts == 0 or num_preds == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels) # 2. compute weighted cost cost_list = [] for match_cost in self.match_costs: cost = match_cost( pred_instances=pred_instances, gt_instances=gt_instances, img_meta=img_meta) cost_list.append(cost) cost = torch.stack(cost_list).sum(dim=0) # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() if linear_sum_assignment is None: raise ImportError('Please run "pip install scipy" ' 'to install scipy first.') matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(device) matched_col_inds = torch.from_numpy(matched_col_inds).to(device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels)
6,075
40.616438
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/task_aligned_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 @TASK_UTILS.register_module() class TaskAlignedAssigner(BaseAssigner): """Task aligned assigner used in the paper: `TOOD: Task-aligned One-stage Object Detection. <https://arxiv.org/abs/2108.07755>`_. Assign a corresponding gt bbox or background to each predicted bbox. Each bbox will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (int): number of bbox selected in each level iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou calculator. Defaults to ``dict(type='BboxOverlaps2D')`` """ def __init__(self, topk: int, iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): assert topk >= 1 self.topk = topk self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, alpha: int = 1, beta: int = 6) -> AssignResult: """Assign gt to bboxes. The assignment is done in following steps 1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt 2. select top-k bbox as candidates for each gt 3. limit the positive sample's center in gt (because the anchor-free detector only can predict positive distance) Args: pred_instances (:obj:`InstaceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors, points, or bboxes predicted by the model, shape(n, 4). gt_instances (:obj:`InstaceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstaceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. alpha (int): Hyper-parameters related to alignment_metrics. Defaults to 1. beta (int): Hyper-parameters related to alignment_metrics. Defaults to 6. Returns: :obj:`TaskAlignedAssignResult`: The assign result. """ priors = pred_instances.priors decode_bboxes = pred_instances.bboxes pred_scores = pred_instances.scores gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels priors = priors[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), priors.size(0) # compute alignment metric between all bbox and gt overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() bbox_scores = pred_scores[:, gt_labels].detach() # assign 0 by default assigned_gt_inds = priors.new_full((num_bboxes, ), 0, dtype=torch.long) assign_metrics = priors.new_zeros((num_bboxes, )) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = priors.new_zeros((num_bboxes, )) if num_gt == 0: # No gt boxes, assign everything to background assigned_gt_inds[:] = 0 assigned_labels = priors.new_full((num_bboxes, ), -1, dtype=torch.long) assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result # select top-k bboxes as candidates for each gt alignment_metrics = bbox_scores**alpha * overlaps**beta topk = min(self.topk, alignment_metrics.size(0)) _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) candidate_metrics = alignment_metrics[candidate_idxs, torch.arange(num_gt)] is_pos = candidate_metrics > 0 # limit the positive sample's center in gt priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0 priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0 for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_priors_cx = priors_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_priors_cy = priors_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_priors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_priors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_priors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_priors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest iou will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assign_metrics[max_overlaps != -INF] = alignment_metrics[ max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result
6,966
42.81761
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/base_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod from typing import Optional from mmengine.structures import InstanceData class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs): """Assign boxes to either a ground truth boxes or a negative boxes."""
571
30.777778
78
py
ERD
ERD-main/mmdet/models/task_modules/assigners/uniform_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_xyxy_to_cxcywh from mmdet.utils import ConfigType from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class UniformAssigner(BaseAssigner): """Uniform Matching between the priors and gt boxes, which can achieve balance in positive priors, and gt_bboxes_ignore was not considered for now. Args: pos_ignore_thr (float): the threshold to ignore positive priors neg_ignore_thr (float): the threshold to ignore negative priors match_times(int): Number of positive priors for each gt box. Defaults to 4. iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou calculator. Defaults to ``dict(type='BboxOverlaps2D')`` """ def __init__(self, pos_ignore_thr: float, neg_ignore_thr: float, match_times: int = 4, iou_calculator: ConfigType = dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign( self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None ) -> AssignResult: """Assign gt to priors. The assignment is done in following steps 1. assign -1 by default 2. compute the L1 cost between boxes. Note that we use priors and predict boxes both 3. compute the ignore indexes use gt_bboxes and predict boxes 4. compute the ignore indexes of positive sample use priors and predict boxes Args: pred_instances (:obj:`InstaceData`): Instances of model predictions. It includes ``priors``, and the priors can be priors, points, or bboxes predicted by the model, shape(n, 4). gt_instances (:obj:`InstaceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. gt_instances_ignore (:obj:`InstaceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels priors = pred_instances.priors bbox_pred = pred_instances.decoder_priors num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 assign_result = AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property( 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result # 2. Compute the L1 cost between boxes # Note that we use priors and predict boxes both cost_bbox = torch.cdist( bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_priors = torch.cdist( bbox_xyxy_to_cxcywh(priors), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) # We found that topk function has different results in cpu and # cuda mode. In order to ensure consistency with the source code, # we also use cpu mode. # TODO: Check whether the performance of cpu and cuda are the same. C = cost_bbox.cpu() C1 = cost_bbox_priors.cpu() # self.match_times x n index = torch.topk( C, # c=b,n,x c[i]=n,x k=self.match_times, dim=0, largest=False)[1] # self.match_times x n index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] # (self.match_times*2) x n indexes = torch.cat((index, index1), dim=1).reshape(-1).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(priors, gt_bboxes) pred_max_overlaps, _ = pred_overlaps.max(dim=1) anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) # 3. Compute the ignore indexes use gt_bboxes and predict boxes ignore_idx = pred_max_overlaps > self.neg_ignore_thr assigned_gt_inds[ignore_idx] = -1 # 4. Compute the ignore indexes of positive sample use priors # and predict boxes pos_gt_index = torch.arange( 0, C1.size(1), device=bbox_pred.device).repeat(self.match_times * 2) pos_ious = anchor_overlaps[indexes, pos_gt_index] pos_ignore_idx = pos_ious < self.pos_ignore_thr pos_gt_index_with_ignore = pos_gt_index + 1 pos_gt_index_with_ignore[pos_ignore_idx] = -1 assigned_gt_inds[indexes] = pos_gt_index_with_ignore if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
7,092
39.764368
77
py
ERD
ERD-main/mmdet/models/task_modules/assigners/point_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .base_assigner import BaseAssigner @TASK_UTILS.register_module() class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale: int = 4, pos_num: int = 3) -> None: self.scale = scale self.pos_num = pos_num def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with the background_label (-1), or a label number. -1 is background, and semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to the background_label (-1) 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). The bboxes predicted by the current model or stage will be named ``bboxes``, ``labels``, and ``scores``, the same as the ``InstanceData`` in other places. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels # points to be assigned, shape(n, 3) while last # dimension stands for (x, y, stride). points = pred_instances.priors num_points = points.shape[0] num_gts = gt_bboxes.shape[0] if num_gts == 0 or num_points == 0: # If no truth assign everything to the background assigned_gt_inds = points.new_full((num_points, ), 0, dtype=torch.long) assigned_labels = points.new_full((num_points, ), -1, dtype=torch.long) return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels) points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1] return AssignResult( num_gts=num_gts, gt_inds=assigned_gt_inds, max_overlaps=None, labels=assigned_labels)
6,884
43.134615
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .dynamic_soft_label_assigner import DynamicSoftLabelAssigner from .grid_assigner import GridAssigner from .hungarian_assigner import HungarianAssigner from .iou2d_calculator import BboxOverlaps2D from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, DiceCost, FocalLossCost, IoUCost) from .max_iou_assigner import MaxIoUAssigner from .multi_instance_assigner import MultiInstanceAssigner from .point_assigner import PointAssigner from .region_assigner import RegionAssigner from .sim_ota_assigner import SimOTAAssigner from .task_aligned_assigner import TaskAlignedAssigner from .uniform_assigner import UniformAssigner __all__ = [ 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', 'TaskAlignedAssigner', 'BBoxL1Cost', 'ClassificationCost', 'CrossEntropyLossCost', 'DiceCost', 'FocalLossCost', 'IoUCost', 'BboxOverlaps2D', 'DynamicSoftLabelAssigner', 'MultiInstanceAssigner' ]
1,413
47.758621
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/approx_max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union import torch from mmengine.config import ConfigDict from mmengine.structures import InstanceData from mmdet.registry import TASK_UTILS from .assign_result import AssignResult from .max_iou_assigner import MaxIoUAssigner @TASK_UTILS.register_module() class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with an integer indicating the ground truth index. (semi-positive index: gt label (0-based), -1: background) - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. iou_calculator (:obj:`ConfigDict` or dict): Config of overlaps Calculator. """ def __init__( self, pos_iou_thr: float, neg_iou_thr: Union[float, tuple], min_pos_iou: float = .0, gt_max_assign_all: bool = True, ignore_iof_thr: float = -1, ignore_wrt_candidates: bool = True, match_low_quality: bool = True, gpu_assign_thr: int = -1, iou_calculator: Union[ConfigDict, dict] = dict(type='BboxOverlaps2D') ) -> None: self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = TASK_UTILS.build(iou_calculator) def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData] = None, **kwargs) -> AssignResult: """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, or a semi-positive number. background_label (-1) means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to background_label (-1) 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to background 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: pred_instances (:obj:`InstanceData`): Instances of model predictions. It includes ``priors``, and the priors can be anchors or points, or the bboxes predicted by the previous stage, has shape (n, 4). ``approxs`` means the group of approxs aligned with ``priors``, has shape (n, num_approxs, 4). gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes``, with shape (k, 4), and ``labels``, with shape (k, ). gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: :obj:`AssignResult`: The assign result. """ squares = pred_instances.priors approxs = pred_instances.approxs gt_bboxes = gt_instances.bboxes gt_labels = gt_instances.labels gt_bboxes_ignore = None if gt_instances_ignore is None else \ gt_instances_ignore.get('bboxes', None) approxs_per_octave = approxs.size(1) num_squares = squares.size(0) num_gts = gt_bboxes.size(0) if num_squares == 0 or num_gts == 0: # No predictions and/or truth, return empty assignment overlaps = approxs.new(num_gts, num_squares) assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) return assign_result # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose(approxs, 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = self.iou_calculator(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( squares, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, squares, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
7,510
45.079755
79
py
ERD
ERD-main/mmdet/models/task_modules/assigners/iou2d_calculator.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.registry import TASK_UTILS from mmdet.structures.bbox import bbox_overlaps, get_box_tensor def cast_tensor_type(x, scale=1., dtype=None): if dtype == 'fp16': # scale is for preventing overflows x = (x / scale).half() return x @TASK_UTILS.register_module() class BboxOverlaps2D: """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" def __init__(self, scale=1., dtype=None): self.scale = scale self.dtype = dtype def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2, y2, score> format. bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be empty. If ``is_aligned `` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) """ bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) def __repr__(self): """str: a string describing the module""" repr_str = self.__class__.__name__ + f'(' \ f'scale={self.scale}, dtype={self.dtype})' return repr_str
2,616
36.927536
77
py