repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/mmdet/models/task_modules/assigners/max_iou_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import TASK_UTILS
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@TASK_UTILS.register_module()
class MaxIoUAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, or a semi-positive integer
indicating the ground truth index.
- -1: negative sample, no assigned gt
- semi-positive integer: positive sample, index (0-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
`min_pos_iou` is set to avoid assigning bboxes that have extremely
small iou with GT as positive samples. It brings about 0.3 mAP
improvements in 1x schedule but does not affect the performance of
3x schedule. More comparisons can be found in
`PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_.
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
match_low_quality (bool): Whether to allow low quality matches. This is
usually allowed for RPN and single stage detectors, but not allowed
in the second stage. Details are demonstrated in Step 4.
gpu_assign_thr (int): The upper bound of the number of GT for GPU
assign. When the number of gt is above this threshold, will assign
on CPU device. Negative values mean not assign on CPU.
iou_calculator (dict): Config of overlaps Calculator.
"""
def __init__(self,
pos_iou_thr: float,
neg_iou_thr: Union[float, tuple],
min_pos_iou: float = .0,
gt_max_assign_all: bool = True,
ignore_iof_thr: float = -1,
ignore_wrt_candidates: bool = True,
match_low_quality: bool = True,
gpu_assign_thr: float = -1,
iou_calculator: dict = dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
self.gpu_assign_thr = gpu_assign_thr
self.match_low_quality = match_low_quality
self.iou_calculator = TASK_UTILS.build(iou_calculator)
def assign(self,
pred_instances: InstanceData,
gt_instances: InstanceData,
gt_instances_ignore: Optional[InstanceData] = None,
**kwargs) -> AssignResult:
"""Assign gt to bboxes.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, or a semi-positive number. -1 means negative
sample, semi-positive number is the index (0-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to the background
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors or points, or the bboxes predicted by the
previous stage, has shape (n, 4). The bboxes predicted by
the current model or stage will be named ``bboxes``,
``labels``, and ``scores``, the same as the ``InstanceData``
in other places.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes``, with shape (k, 4),
and ``labels``, with shape (k, ).
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes``
attribute data that is ignored during training and testing.
Defaults to None.
Returns:
:obj:`AssignResult`: The assign result.
Example:
>>> from mmengine.structures import InstanceData
>>> self = MaxIoUAssigner(0.5, 0.5)
>>> pred_instances = InstanceData()
>>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10],
... [10, 10, 20, 20]])
>>> gt_instances = InstanceData()
>>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]])
>>> gt_instances.labels = torch.Tensor([0])
>>> assign_result = self.assign(pred_instances, gt_instances)
>>> expected_gt_inds = torch.LongTensor([1, 0])
>>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
gt_labels = gt_instances.labels
if gt_instances_ignore is not None:
gt_bboxes_ignore = gt_instances_ignore.bboxes
else:
gt_bboxes_ignore = None
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
gt_bboxes.shape[0] > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = priors.device
priors = priors.cpu()
gt_bboxes = gt_bboxes.cpu()
gt_labels = gt_labels.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
overlaps = self.iou_calculator(gt_bboxes, priors)
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and priors.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = self.iou_calculator(
priors, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = self.iou_calculator(
gt_bboxes_ignore, priors, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
def assign_wrt_overlaps(self, overlaps: Tensor,
gt_labels: Tensor) -> AssignResult:
"""Assign w.r.t. the overlaps of priors with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor): Labels of k gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
return AssignResult(
num_gts=num_gts,
gt_inds=assigned_gt_inds,
max_overlaps=max_overlaps,
labels=assigned_labels)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
# the negative inds are set to be 0
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwrite the assigned_gt_inds assigned
# in Step 3. Thus, the assigned gt might not be the best one for
# prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
# bbox 1 will be assigned as the best target for bbox A in step 3.
# However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
# assigned_gt_inds will be overwritten to be bbox 2.
# This might be the reason that it is not used in ROI Heads.
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] -
1]
return AssignResult(
num_gts=num_gts,
gt_inds=assigned_gt_inds,
max_overlaps=max_overlaps,
labels=assigned_labels)
| 11,559 | 45.99187 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/prior_generators/point_generator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch.nn.modules.utils import _pair
from mmdet.registry import TASK_UTILS
DeviceType = Union[str, torch.device]
@TASK_UTILS.register_module()
class PointGenerator:
def _meshgrid(self,
x: Tensor,
y: Tensor,
row_major: bool = True) -> Tuple[Tensor, Tensor]:
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_points(self,
featmap_size: Tuple[int, int],
stride=16,
device: DeviceType = 'cuda') -> Tensor:
"""Generate grid points of a single level.
Args:
featmap_size (tuple[int, int]): Size of the feature maps.
stride (int): The stride of corresponding feature map.
device (str | torch.device): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: grid point in a feature map.
"""
feat_h, feat_w = featmap_size
shift_x = torch.arange(0., feat_w, device=device) * stride
shift_y = torch.arange(0., feat_h, device=device) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
stride = shift_x.new_full((shift_xx.shape[0], ), stride)
shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
all_points = shifts.to(device)
return all_points
def valid_flags(self,
featmap_size: Tuple[int, int],
valid_size: Tuple[int, int],
device: DeviceType = 'cuda') -> Tensor:
"""Generate valid flags of anchors in a feature map.
Args:
featmap_sizes (list(tuple[int, int])): List of feature map sizes in
multiple feature levels.
valid_shape (tuple[int, int]): The valid shape of the image.
device (str | torch.device): Device where the anchors will be
put on.
Return:
torch.Tensor: Valid flags of anchors in a level.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
return valid
@TASK_UTILS.register_module()
class MlvlPointGenerator:
"""Standard points generator for multi-level (Mlvl) feature maps in 2D
points-based detectors.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels in order (w, h).
offset (float): The offset of points, the value is normalized with
corresponding stride. Defaults to 0.5.
"""
def __init__(self,
strides: Union[List[int], List[Tuple[int, int]]],
offset: float = 0.5) -> None:
self.strides = [_pair(stride) for stride in strides]
self.offset = offset
@property
def num_levels(self) -> int:
"""int: number of feature levels that the generator will be applied"""
return len(self.strides)
@property
def num_base_priors(self) -> List[int]:
"""list[int]: The number of priors (points) at a point
on the feature grid"""
return [1 for _ in range(len(self.strides))]
def _meshgrid(self,
x: Tensor,
y: Tensor,
row_major: bool = True) -> Tuple[Tensor, Tensor]:
yy, xx = torch.meshgrid(y, x)
if row_major:
# warning .flatten() would cause error in ONNX exporting
# have to use reshape here
return xx.reshape(-1), yy.reshape(-1)
else:
return yy.reshape(-1), xx.reshape(-1)
def grid_priors(self,
featmap_sizes: List[Tuple],
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda',
with_stride: bool = False) -> List[Tensor]:
"""Generate grid points of multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels, each size arrange as
as (h, w).
dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32.
device (str | torch.device): The device where the anchors will be
put on.
with_stride (bool): Whether to concatenate the stride to
the last dimension of points.
Return:
list[torch.Tensor]: Points of multiple feature levels.
The sizes of each tensor should be (N, 2) when with stride is
``False``, where N = width * height, width and height
are the sizes of the corresponding feature level,
and the last dimension 2 represent (coord_x, coord_y),
otherwise the shape should be (N, 4),
and the last dimension 4 represent
(coord_x, coord_y, stride_w, stride_h).
"""
assert self.num_levels == len(featmap_sizes)
multi_level_priors = []
for i in range(self.num_levels):
priors = self.single_level_grid_priors(
featmap_sizes[i],
level_idx=i,
dtype=dtype,
device=device,
with_stride=with_stride)
multi_level_priors.append(priors)
return multi_level_priors
def single_level_grid_priors(self,
featmap_size: Tuple[int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda',
with_stride: bool = False) -> Tensor:
"""Generate grid Points of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int]): Size of the feature maps, arrange as
(h, w).
level_idx (int): The index of corresponding feature map level.
dtype (:obj:`dtype`): Dtype of priors. Defaults to torch.float32.
device (str | torch.device): The device the tensor will be put on.
Defaults to 'cuda'.
with_stride (bool): Concatenate the stride to the last dimension
of points.
Return:
Tensor: Points of single feature levels.
The shape of tensor should be (N, 2) when with stride is
``False``, where N = width * height, width and height
are the sizes of the corresponding feature level,
and the last dimension 2 represent (coord_x, coord_y),
otherwise the shape should be (N, 4),
and the last dimension 4 represent
(coord_x, coord_y, stride_w, stride_h).
"""
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
shift_x = (torch.arange(0, feat_w, device=device) +
self.offset) * stride_w
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_x = shift_x.to(dtype)
shift_y = (torch.arange(0, feat_h, device=device) +
self.offset) * stride_h
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_y = shift_y.to(dtype)
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
if not with_stride:
shifts = torch.stack([shift_xx, shift_yy], dim=-1)
else:
# use `shape[0]` instead of `len(shift_xx)` for ONNX export
stride_w = shift_xx.new_full((shift_xx.shape[0], ),
stride_w).to(dtype)
stride_h = shift_xx.new_full((shift_yy.shape[0], ),
stride_h).to(dtype)
shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h],
dim=-1)
all_points = shifts.to(device)
return all_points
def valid_flags(self,
featmap_sizes: List[Tuple[int, int]],
pad_shape: Tuple[int],
device: DeviceType = 'cuda') -> List[Tensor]:
"""Generate valid flags of points of multiple feature levels.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in
multiple feature levels, each size arrange as
as (h, w).
pad_shape (tuple(int)): The padded shape of the image,
arrange as (h, w).
device (str | torch.device): The device where the anchors will be
put on.
Return:
list(torch.Tensor): Valid flags of points of multiple levels.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_flags = []
for i in range(self.num_levels):
point_stride = self.strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = pad_shape[:2]
valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h)
valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w),
(valid_feat_h, valid_feat_w),
device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self,
featmap_size: Tuple[int, int],
valid_size: Tuple[int, int],
device: DeviceType = 'cuda') -> Tensor:
"""Generate the valid flags of points of a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps, arrange as
as (h, w).
valid_size (tuple[int]): The valid size of the feature maps.
The size arrange as as (h, w).
device (str | torch.device): The device where the flags will be
put on. Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each points in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
return valid
def sparse_priors(self,
prior_idxs: Tensor,
featmap_size: Tuple[int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda') -> Tensor:
"""Generate sparse points according to the ``prior_idxs``.
Args:
prior_idxs (Tensor): The index of corresponding anchors
in the feature map.
featmap_size (tuple[int]): feature map size arrange as (w, h).
level_idx (int): The level index of corresponding feature
map.
dtype (obj:`torch.dtype`): Date type of points. Defaults to
``torch.float32``.
device (str | torch.device): The device where the points is
located.
Returns:
Tensor: Anchor with shape (N, 2), N should be equal to
the length of ``prior_idxs``. And last dimension
2 represent (coord_x, coord_y).
"""
height, width = featmap_size
x = (prior_idxs % width + self.offset) * self.strides[level_idx][0]
y = ((prior_idxs // width) % height +
self.offset) * self.strides[level_idx][1]
prioris = torch.stack([x, y], 1).to(dtype)
prioris = prioris.to(device)
return prioris
| 13,026 | 39.456522 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/prior_generators/anchor_generator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from mmengine.utils import is_tuple_of
from torch import Tensor
from torch.nn.modules.utils import _pair
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
DeviceType = Union[str, torch.device]
@TASK_UTILS.register_module()
class AnchorGenerator:
"""Standard anchor generator for 2D anchor-based detectors.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels in order (w, h).
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int], Optional): Anchor scales for anchors
in a single level. It cannot be set at the same time
if `octave_base_scale` and `scales_per_octave` are set.
base_sizes (list[int], Optional): The basic sizes
of anchors in multiple levels.
If None is given, strides will be used as base_sizes.
(If strides are non square, the shortest stride is taken.)
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int, Optional): The base scale of octave.
scales_per_octave (int, Optional): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float]], Optional): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. If a list of tuple of
float is given, they will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0 in V2.0.
use_box_type (bool): Whether to warp anchors with the box type data
structure. Defaults to False.
Examples:
>>> from mmdet.models.task_modules.
... prior_generators import AnchorGenerator
>>> self = AnchorGenerator([16], [1.], [1.], [9])
>>> all_anchors = self.grid_priors([(2, 2)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]])]
>>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
>>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]]), \
tensor([[-9., -9., 9., 9.]])]
"""
def __init__(self,
strides: Union[List[int], List[Tuple[int, int]]],
ratios: List[float],
scales: Optional[List[int]] = None,
base_sizes: Optional[List[int]] = None,
scale_major: bool = True,
octave_base_scale: Optional[int] = None,
scales_per_octave: Optional[int] = None,
centers: Optional[List[Tuple[float, float]]] = None,
center_offset: float = 0.,
use_box_type: bool = False) -> None:
# check center and center_offset
if center_offset != 0:
assert centers is None, 'center cannot be set when center_offset' \
f'!=0, {centers} is given.'
if not (0 <= center_offset <= 1):
raise ValueError('center_offset should be in range [0, 1], '
f'{center_offset} is given.')
if centers is not None:
assert len(centers) == len(strides), \
'The number of strides should be the same as centers, got ' \
f'{strides} and {centers}'
# calculate base sizes of anchors
self.strides = [_pair(stride) for stride in strides]
self.base_sizes = [min(stride) for stride in self.strides
] if base_sizes is None else base_sizes
assert len(self.base_sizes) == len(self.strides), \
'The number of strides should be the same as base sizes, got ' \
f'{self.strides} and {self.base_sizes}'
# calculate scales of anchors
assert ((octave_base_scale is not None
and scales_per_octave is not None) ^ (scales is not None)), \
'scales and octave_base_scale with scales_per_octave cannot' \
' be set at the same time'
if scales is not None:
self.scales = torch.Tensor(scales)
elif octave_base_scale is not None and scales_per_octave is not None:
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
scales = octave_scales * octave_base_scale
self.scales = torch.Tensor(scales)
else:
raise ValueError('Either scales or octave_base_scale with '
'scales_per_octave should be set')
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.centers = centers
self.center_offset = center_offset
self.base_anchors = self.gen_base_anchors()
self.use_box_type = use_box_type
@property
def num_base_anchors(self) -> List[int]:
"""list[int]: total number of base anchors in a feature grid"""
return self.num_base_priors
@property
def num_base_priors(self) -> List[int]:
"""list[int]: The number of priors (anchors) at a point
on the feature grid"""
return [base_anchors.size(0) for base_anchors in self.base_anchors]
@property
def num_levels(self) -> int:
"""int: number of feature levels that the generator will be applied"""
return len(self.strides)
def gen_base_anchors(self) -> List[Tensor]:
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(
base_size,
scales=self.scales,
ratios=self.ratios,
center=center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self,
base_size: Union[int, float],
scales: Tensor,
ratios: Tensor,
center: Optional[Tuple[float]] = None) \
-> Tensor:
"""Generate base anchors of a single level.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between the height
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * w
y_center = self.center_offset * h
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
y_center + 0.5 * hs
]
base_anchors = torch.stack(base_anchors, dim=-1)
return base_anchors
def _meshgrid(self,
x: Tensor,
y: Tensor,
row_major: bool = True) -> Tuple[Tensor]:
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
# use shape instead of len to keep tracing while exporting to onnx
xx = x.repeat(y.shape[0])
yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_priors(self,
featmap_sizes: List[Tuple],
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda') -> List[Tensor]:
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
dtype (:obj:`torch.dtype`): Dtype of priors.
Defaults to torch.float32.
device (str | torch.device): The device where the anchors
will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_priors(
featmap_sizes[i], level_idx=i, dtype=dtype, device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_priors(self,
featmap_size: Tuple[int, int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda') -> Tensor:
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int, int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str | torch.device): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
if self.use_box_type:
all_anchors = HorizontalBoxes(all_anchors)
return all_anchors
def sparse_priors(self,
prior_idxs: Tensor,
featmap_size: Tuple[int, int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda') -> Tensor:
"""Generate sparse anchors according to the ``prior_idxs``.
Args:
prior_idxs (Tensor): The index of corresponding anchors
in the feature map.
featmap_size (tuple[int, int]): feature map size arrange as (h, w).
level_idx (int): The level index of corresponding feature
map.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str | torch.device): The device where the points is
located.
Returns:
Tensor: Anchor with shape (N, 4), N should be equal to
the length of ``prior_idxs``.
"""
height, width = featmap_size
num_base_anchors = self.num_base_anchors[level_idx]
base_anchor_id = prior_idxs % num_base_anchors
x = (prior_idxs //
num_base_anchors) % width * self.strides[level_idx][0]
y = (prior_idxs // width //
num_base_anchors) % height * self.strides[level_idx][1]
priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \
self.base_anchors[level_idx][base_anchor_id, :].to(device)
return priors
def grid_anchors(self,
featmap_sizes: List[Tuple],
device: DeviceType = 'cuda') -> List[Tensor]:
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
device (str | torch.device): Device where the anchors will be
put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
warnings.warn('``grid_anchors`` would be deprecated soon. '
'Please use ``grid_priors`` ')
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_anchors(
self.base_anchors[i].to(device),
featmap_sizes[i],
self.strides[i],
device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_anchors(self,
base_anchors: Tensor,
featmap_size: Tuple[int, int],
stride: Tuple[int, int] = (16, 16),
device: DeviceType = 'cuda') -> Tensor:
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_anchors``.
Args:
base_anchors (torch.Tensor): The base anchors of a feature grid.
featmap_size (tuple[int]): Size of the feature maps.
stride (tuple[int, int]): Stride of the feature map in order
(w, h). Defaults to (16, 16).
device (str | torch.device): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
warnings.warn(
'``single_level_grid_anchors`` would be deprecated soon. '
'Please use ``single_level_grid_priors`` ')
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride[0]
shift_y = torch.arange(0, feat_h, device=device) * stride[1]
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self,
featmap_sizes: List[Tuple[int, int]],
pad_shape: Tuple,
device: DeviceType = 'cuda') -> List[Tensor]:
"""Generate valid flags of anchors in multiple feature levels.
Args:
featmap_sizes (list(tuple[int, int])): List of feature map sizes in
multiple feature levels.
pad_shape (tuple): The padded shape of the image.
device (str | torch.device): Device where the anchors will be
put on.
Return:
list(torch.Tensor): Valid flags of anchors in multiple levels.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = pad_shape[:2]
valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w),
(valid_feat_h, valid_feat_w),
self.num_base_anchors[i],
device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self,
featmap_size: Tuple[int, int],
valid_size: Tuple[int, int],
num_base_anchors: int,
device: DeviceType = 'cuda') -> Tensor:
"""Generate the valid flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps, arrange
as (h, w).
valid_size (tuple[int]): The valid size of the feature maps.
num_base_anchors (int): The number of base anchors.
device (str | torch.device): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(valid.size(0),
num_base_anchors).contiguous().view(-1)
return valid
def __repr__(self) -> str:
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}octave_base_scale='
repr_str += f'{self.octave_base_scale},\n'
repr_str += f'{indent_str}scales_per_octave='
repr_str += f'{self.scales_per_octave},\n'
repr_str += f'{indent_str}num_levels={self.num_levels}\n'
repr_str += f'{indent_str}centers={self.centers},\n'
repr_str += f'{indent_str}center_offset={self.center_offset})'
return repr_str
@TASK_UTILS.register_module()
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
min_sizes (list[float]): The list of minimum anchor sizes on each
level.
max_sizes (list[float]): The list of maximum anchor sizes on each
level.
basesize_ratio_range (tuple(float)): Ratio range of anchors. Being
used when not setting min_sizes and max_sizes.
input_size (int): Size of feature map, 300 for SSD300, 512 for
SSD512. Being used when not setting min_sizes and max_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
use_box_type (bool): Whether to warp anchors with the box type data
structure. Defaults to False.
"""
def __init__(self,
strides: Union[List[int], List[Tuple[int, int]]],
ratios: List[float],
min_sizes: Optional[List[float]] = None,
max_sizes: Optional[List[float]] = None,
basesize_ratio_range: Tuple[float] = (0.15, 0.9),
input_size: int = 300,
scale_major: bool = True,
use_box_type: bool = False) -> None:
assert len(strides) == len(ratios)
assert not (min_sizes is None) ^ (max_sizes is None)
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
if min_sizes is None and max_sizes is None:
# use hard code to generate SSD anchors
self.input_size = input_size
assert is_tuple_of(basesize_ratio_range, float)
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
raise ValueError(
'basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
raise ValueError(
'When not setting min_sizes and max_sizes,'
'basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
raise ValueError(
'Only support 300 or 512 in SSDAnchorGenerator when '
'not setting min_sizes and max_sizes, '
f'got {self.input_size}.')
assert len(min_sizes) == len(max_sizes) == len(strides)
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
self.use_box_type = use_box_type
def gen_base_anchors(self) -> List[Tensor]:
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self) -> str:
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
@TASK_UTILS.register_module()
class LegacyAnchorGenerator(AnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
Note:
Difference to the V2.0 anchor generator:
1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
2. The width/height are minused by 1 when calculating the anchors' \
centers and corners to meet the V1.x coordinate system.
3. The anchors' corners are quantized.
Args:
strides (list[int] | list[tuple[int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int]): The basic sizes of anchors in multiple levels.
If None is given, strides will be used to generate base_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. It a list of float
is given, this list will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0.5 in V2.0 but it should be 0.5
in v1.x models.
use_box_type (bool): Whether to warp anchors with the box type data
structure. Defaults to False.
Examples:
>>> from mmdet.models.task_modules.
... prior_generators import LegacyAnchorGenerator
>>> self = LegacyAnchorGenerator(
>>> [16], [1.], [1.], [9], center_offset=0.5)
>>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
>>> print(all_anchors)
[tensor([[ 0., 0., 8., 8.],
[16., 0., 24., 8.],
[ 0., 16., 8., 24.],
[16., 16., 24., 24.]])]
"""
def gen_single_level_base_anchors(self,
base_size: Union[int, float],
scales: Tensor,
ratios: Tensor,
center: Optional[Tuple[float]] = None) \
-> Tensor:
"""Generate base anchors of a single level.
Note:
The width/height of anchors are minused by 1 when calculating \
the centers and corners to meet the V1.x coordinate system.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between the height.
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature map.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * (w - 1)
y_center = self.center_offset * (h - 1)
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
]
base_anchors = torch.stack(base_anchors, dim=-1).round()
return base_anchors
@TASK_UTILS.register_module()
class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
can be found in `LegacyAnchorGenerator`.
"""
def __init__(self,
strides: Union[List[int], List[Tuple[int, int]]],
ratios: List[float],
basesize_ratio_range: Tuple[float],
input_size: int = 300,
scale_major: bool = True,
use_box_type: bool = False) -> None:
super(LegacySSDAnchorGenerator, self).__init__(
strides=strides,
ratios=ratios,
basesize_ratio_range=basesize_ratio_range,
input_size=input_size,
scale_major=scale_major,
use_box_type=use_box_type)
self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
for stride in strides]
self.base_anchors = self.gen_base_anchors()
@TASK_UTILS.register_module()
class YOLOAnchorGenerator(AnchorGenerator):
"""Anchor generator for YOLO.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
base_sizes (list[list[tuple[int, int]]]): The basic sizes
of anchors in multiple levels.
"""
def __init__(self,
strides: Union[List[int], List[Tuple[int, int]]],
base_sizes: List[List[Tuple[int, int]]],
use_box_type: bool = False) -> None:
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.base_sizes = []
num_anchor_per_level = len(base_sizes[0])
for base_sizes_per_level in base_sizes:
assert num_anchor_per_level == len(base_sizes_per_level)
self.base_sizes.append(
[_pair(base_size) for base_size in base_sizes_per_level])
self.base_anchors = self.gen_base_anchors()
self.use_box_type = use_box_type
@property
def num_levels(self) -> int:
"""int: number of feature levels that the generator will be applied"""
return len(self.base_sizes)
def gen_base_anchors(self) -> List[Tensor]:
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_sizes_per_level in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(base_sizes_per_level,
center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self,
base_sizes_per_level: List[Tuple[int]],
center: Optional[Tuple[float]] = None) \
-> Tensor:
"""Generate base anchors of a single level.
Args:
base_sizes_per_level (list[tuple[int]]): Basic sizes of
anchors.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
x_center, y_center = center
base_anchors = []
for base_size in base_sizes_per_level:
w, h = base_size
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchor = torch.Tensor([
x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
y_center + 0.5 * h
])
base_anchors.append(base_anchor)
base_anchors = torch.stack(base_anchors, dim=0)
return base_anchors
| 37,295 | 42.929329 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/prior_generators/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
import torch
from torch import Tensor
from mmdet.structures.bbox import BaseBoxes
def anchor_inside_flags(flat_anchors: Tensor,
valid_flags: Tensor,
img_shape: Tuple[int],
allowed_border: int = 0) -> Tensor:
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
if isinstance(flat_anchors, BaseBoxes):
inside_flags = valid_flags & \
flat_anchors.is_inside([img_h, img_w],
all_inside=True,
allowed_border=allowed_border)
else:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox: Tensor,
ratio: float,
featmap_size: Optional[Tuple] = None) -> Tuple[int]:
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
ratio (float): Ratio of the output region.
featmap_size (tuple, Optional): Feature map size in (height, width)
order used for clipping the boundary. Defaults to None.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
| 2,671 | 36.633803 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/prior_generators/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
SSDAnchorGenerator, YOLOAnchorGenerator)
from .point_generator import MlvlPointGenerator, PointGenerator
from .utils import anchor_inside_flags, calc_region
__all__ = [
'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
'PointGenerator', 'calc_region', 'YOLOAnchorGenerator',
'MlvlPointGenerator', 'SSDAnchorGenerator'
]
| 500 | 40.75 | 71 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/multi_instance_sampling_result.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import Tensor
from ..assigners import AssignResult
from .sampling_result import SamplingResult
class MultiInstanceSamplingResult(SamplingResult):
"""Bbox sampling result. Further encapsulation of SamplingResult. Three
attributes neg_assigned_gt_inds, neg_gt_labels, and neg_gt_bboxes have been
added for SamplingResult.
Args:
pos_inds (Tensor): Indices of positive samples.
neg_inds (Tensor): Indices of negative samples.
priors (Tensor): The priors can be anchors or points,
or the bboxes predicted by the previous stage.
gt_and_ignore_bboxes (Tensor): Ground truth and ignore bboxes.
assign_result (:obj:`AssignResult`): Assigning results.
gt_flags (Tensor): The Ground truth flags.
avg_factor_with_neg (bool): If True, ``avg_factor`` equal to
the number of total priors; Otherwise, it is the number of
positive priors. Defaults to True.
"""
def __init__(self,
pos_inds: Tensor,
neg_inds: Tensor,
priors: Tensor,
gt_and_ignore_bboxes: Tensor,
assign_result: AssignResult,
gt_flags: Tensor,
avg_factor_with_neg: bool = True) -> None:
self.neg_assigned_gt_inds = assign_result.gt_inds[neg_inds]
self.neg_gt_labels = assign_result.labels[neg_inds]
if gt_and_ignore_bboxes.numel() == 0:
self.neg_gt_bboxes = torch.empty_like(gt_and_ignore_bboxes).view(
-1, 4)
else:
if len(gt_and_ignore_bboxes.shape) < 2:
gt_and_ignore_bboxes = gt_and_ignore_bboxes.view(-1, 4)
self.neg_gt_bboxes = gt_and_ignore_bboxes[
self.neg_assigned_gt_inds.long(), :]
# To resist the minus 1 operation in `SamplingResult.init()`.
assign_result.gt_inds += 1
super().__init__(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_and_ignore_bboxes,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=avg_factor_with_neg)
| 2,260 | 38.666667 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/instance_balanced_pos_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.registry import TASK_UTILS
from .random_sampler import RandomSampler
@TASK_UTILS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples
for each instance."""
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(
assign_result.gt_inds == i.item(), as_tuple=False)
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
| 2,318 | 39.684211 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/combined_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class CombinedSampler(BaseSampler):
"""A sampler that combines positive sampler and negative sampler."""
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = TASK_UTILS.build(pos_sampler, default_args=kwargs)
self.neg_sampler = TASK_UTILS.build(neg_sampler, default_args=kwargs)
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
| 760 | 33.590909 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/mask_sampling_result.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""copy from
https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py."""
import torch
from torch import Tensor
from ..assigners import AssignResult
from .sampling_result import SamplingResult
class MaskSamplingResult(SamplingResult):
"""Mask sampling result."""
def __init__(self,
pos_inds: Tensor,
neg_inds: Tensor,
masks: Tensor,
gt_masks: Tensor,
assign_result: AssignResult,
gt_flags: Tensor,
avg_factor_with_neg: bool = True) -> None:
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.num_pos = max(pos_inds.numel(), 1)
self.num_neg = max(neg_inds.numel(), 1)
self.avg_factor = self.num_pos + self.num_neg \
if avg_factor_with_neg else self.num_pos
self.pos_masks = masks[pos_inds]
self.neg_masks = masks[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_masks.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_masks.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_masks = torch.empty_like(gt_masks)
else:
self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :]
@property
def masks(self) -> Tensor:
"""torch.Tensor: concatenated positive and negative masks."""
return torch.cat([self.pos_masks, self.neg_masks])
def __nice__(self) -> str:
data = self.info.copy()
data['pos_masks'] = data.pop('pos_masks').shape
data['neg_masks'] = data.pop('neg_masks').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self) -> dict:
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_masks': self.pos_masks,
'neg_masks': self.neg_masks,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
| 2,361 | 33.231884 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/multi_instance_random_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from mmengine.structures import InstanceData
from numpy import ndarray
from torch import Tensor
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .multi_instance_sampling_result import MultiInstanceSamplingResult
from .random_sampler import RandomSampler
@TASK_UTILS.register_module()
class MultiInsRandomSampler(RandomSampler):
"""Random sampler for multi instance.
Note:
Multi-instance means to predict multiple detection boxes with
one proposal box. `AssignResult` may assign multiple gt boxes
to each proposal box, in this case `RandomSampler` should be
replaced by `MultiInsRandomSampler`
"""
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some positive samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(
assign_result.labels[:, 0] > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some negative samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(
assign_result.labels[:, 0] == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData,
**kwargs) -> MultiInstanceSamplingResult:
"""Sample positive and negative bboxes.
Args:
assign_result (:obj:`AssignResult`): Assigning results from
MultiInstanceAssigner.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors or points, or the bboxes predicted by the
previous stage, has shape (n, 4). The bboxes predicted by
the current model or stage will be named ``bboxes``,
``labels``, and ``scores``, the same as the ``InstanceData``
in other places.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes``, with shape (k, 4),
and ``labels``, with shape (k, ).
Returns:
:obj:`MultiInstanceSamplingResult`: Sampling result.
"""
assert 'batch_gt_instances_ignore' in kwargs, \
'batch_gt_instances_ignore is necessary for MultiInsRandomSampler'
gt_bboxes = gt_instances.bboxes
ignore_bboxes = kwargs['batch_gt_instances_ignore'].bboxes
gt_and_ignore_bboxes = torch.cat([gt_bboxes, ignore_bboxes], dim=0)
priors = pred_instances.priors
if len(priors.shape) < 2:
priors = priors[None, :]
priors = priors[:, :4]
gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8)
priors = torch.cat([priors, gt_and_ignore_bboxes], dim=0)
gt_ones = priors.new_ones(
gt_and_ignore_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_flags, gt_ones])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(assign_result,
num_expected_pos)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(assign_result,
num_expected_neg)
neg_inds = neg_inds.unique()
sampling_result = MultiInstanceSamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_and_ignore_bboxes=gt_and_ignore_bboxes,
assign_result=assign_result,
gt_flags=gt_flags)
return sampling_result
| 5,250 | 39.083969 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/base_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
from mmengine.structures import InstanceData
from mmdet.structures.bbox import BaseBoxes, cat_boxes
from ..assigners import AssignResult
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
"""Base class of samplers.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num: int,
pos_fraction: float,
neg_pos_ub: int = -1,
add_gt_as_proposals: bool = True,
**kwargs) -> None:
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs):
"""Sample positive samples."""
pass
@abstractmethod
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
**kwargs):
"""Sample negative samples."""
pass
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, **kwargs) -> SamplingResult:
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors or points, or the bboxes predicted by the
previous stage, has shape (n, 4). The bboxes predicted by
the current model or stage will be named ``bboxes``,
``labels``, and ``scores``, the same as the ``InstanceData``
in other places.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes``, with shape (k, 4),
and ``labels``, with shape (k, ).
Returns:
:obj:`SamplingResult`: Sampling result.
Example:
>>> from mmengine.structures import InstanceData
>>> from mmdet.models.task_modules.samplers import RandomSampler,
>>> from mmdet.models.task_modules.assigners import AssignResult
>>> from mmdet.models.task_modules.samplers.
... sampling_result import ensure_rng, random_boxes
>>> rng = ensure_rng(None)
>>> assign_result = AssignResult.random(rng=rng)
>>> pred_instances = InstanceData()
>>> pred_instances.priors = random_boxes(assign_result.num_preds,
... rng=rng)
>>> gt_instances = InstanceData()
>>> gt_instances.bboxes = random_boxes(assign_result.num_gts,
... rng=rng)
>>> gt_instances.labels = torch.randint(
... 0, 5, (assign_result.num_gts,), dtype=torch.long)
>>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
>>> add_gt_as_proposals=False)
>>> self = self.sample(assign_result, pred_instances, gt_instances)
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
gt_labels = gt_instances.labels
if len(priors.shape) < 2:
priors = priors[None, :]
gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
# When `gt_bboxes` and `priors` are all box type, convert
# `gt_bboxes` type to `priors` type.
if (isinstance(gt_bboxes, BaseBoxes)
and isinstance(priors, BaseBoxes)):
gt_bboxes_ = gt_bboxes.convert_to(type(priors))
else:
gt_bboxes_ = gt_bboxes
priors = cat_boxes([gt_bboxes_, priors], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = priors.new_ones(gt_bboxes_.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=priors, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=priors, **kwargs)
neg_inds = neg_inds.unique()
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags)
return sampling_result
| 5,805 | 41.379562 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/random_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from numpy import ndarray
from torch import Tensor
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num: int,
pos_fraction: float,
neg_pos_ub: int = -1,
add_gt_as_proposals: bool = True,
**kwargs):
from .sampling_result import ensure_rng
super().__init__(
num=num,
pos_fraction=pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals)
self.rng = ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery: Union[Tensor, ndarray, list],
num: int) -> Union[Tensor, ndarray]:
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some positive samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some negative samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
| 3,902 | 34.481818 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/ohem_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox2roi
from .base_sampler import BaseSampler
@TASK_UTILS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
loss_key='loss_cls',
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
self.loss_key = loss_key
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')[self.loss_key]
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
| 4,229 | 36.767857 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/iou_balanced_neg_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.registry import TASK_UTILS
from .random_sampler import RandomSampler
@TASK_UTILS.register_module()
class IoUBalancedNegSampler(RandomSampler):
"""IoU Balanced Sampling.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
are sampled from proposals whose IoU are lower than `floor_thr` randomly.
The others are sampled from proposals whose IoU are higher than
`floor_thr`. These proposals are sampled from some bins evenly, which are
split by `num_bins` via IoU evenly.
Args:
num (int): number of proposals.
pos_fraction (float): fraction of positive proposals.
floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
set to -1 if all using IoU balanced sampling.
floor_fraction (float): sampling fraction of proposals under floor_thr.
num_bins (int): number of bins in IoU balanced sampling.
"""
def __init__(self,
num,
pos_fraction,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
**kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
**kwargs)
assert floor_thr >= 0 or floor_thr == -1
assert 0 <= floor_fraction <= 1
assert num_bins >= 1
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
"""Sample according to the iou interval.
Args:
max_overlaps (torch.Tensor): IoU between bounding boxes and ground
truth boxes.
full_set (set(int)): A full set of indices of boxes。
num_expected (int): Number of expected samples。
Returns:
np.ndarray: Indices of samples
"""
max_iou = max_overlaps.max()
iou_interval = (max_iou - self.floor_thr) / self.num_bins
per_num_expected = int(num_expected / self.num_bins)
sampled_inds = []
for i in range(self.num_bins):
start_iou = self.floor_thr + i * iou_interval
end_iou = self.floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou,
max_overlaps < end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = self.random_choice(tmp_inds,
per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int64)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected negative samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
# balance sampling for negative samples
neg_set = set(neg_inds.cpu().numpy())
if self.floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0,
max_overlaps < self.floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= self.floor_thr)[0])
elif self.floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
# for sampling interval calculation
self.floor_thr = 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - self.floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if self.num_bins >= 2:
iou_sampled_inds = self.sample_via_interval(
max_overlaps, set(iou_sampling_neg_inds),
num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(
iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int64)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = self.random_choice(
floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int64)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(
assign_result.gt_inds.device)
return sampled_inds
| 6,745 | 41.427673 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/mask_pseudo_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""copy from
https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py."""
import torch
from mmengine.structures import InstanceData
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .mask_sampling_result import MaskSamplingResult
@TASK_UTILS.register_module()
class MaskPseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Mask assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``scores`` and ``masks`` predicted
by the model.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``labels`` and ``masks``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
pred_masks = pred_instances.masks
gt_masks = gt_instances.masks
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = pred_masks.new_zeros(pred_masks.shape[0], dtype=torch.uint8)
sampling_result = MaskSamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
masks=pred_masks,
gt_masks=gt_masks,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=False)
return sampling_result
| 2,198 | 35.04918 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/score_hlr_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from mmcv.ops import nms_match
from mmengine.structures import InstanceData
from numpy import ndarray
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox2roi
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class ScoreHLRSampler(BaseSampler):
r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.
Score hierarchical local rank (HLR) differentiates with RandomSampler in
negative part. It firstly computes Score-HLR in a two-step way,
then linearly maps score hlr to the loss weights.
Args:
num (int): Total number of sampled RoIs.
pos_fraction (float): Fraction of positive samples.
context (:obj:`BaseRoIHead`): RoI head that the sampler belongs to.
neg_pos_ub (int): Upper bound of the ratio of num negative to num
positive, -1 means no upper bound. Defaults to -1.
add_gt_as_proposals (bool): Whether to add ground truth as proposals.
Defaults to True.
k (float): Power of the non-linear mapping. Defaults to 0.5
bias (float): Shift of the non-linear mapping. Defaults to 0.
score_thr (float): Minimum score that a negative sample is to be
considered as valid bbox. Defaults to 0.05.
iou_thr (float): IoU threshold for NMS match. Defaults to 0.5.
"""
def __init__(self,
num: int,
pos_fraction: float,
context,
neg_pos_ub: int = -1,
add_gt_as_proposals: bool = True,
k: float = 0.5,
bias: float = 0,
score_thr: float = 0.05,
iou_thr: float = 0.5,
**kwargs) -> None:
super().__init__(
num=num,
pos_fraction=pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals)
self.k = k
self.bias = bias
self.score_thr = score_thr
self.iou_thr = iou_thr
self.context = context
# context of cascade detectors is a list, so distinguish them here.
if not hasattr(context, 'num_stages'):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
self.with_shared_head = context.with_shared_head
if self.with_shared_head:
self.shared_head = context.shared_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[
context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
@staticmethod
def random_choice(gallery: Union[Tensor, ndarray, list],
num: int) -> Union[Tensor, ndarray]:
"""Randomly select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor or ndarray or list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result: AssignResult, num_expected: int,
**kwargs) -> Union[Tensor, ndarray]:
"""Randomly sample some positive samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result: AssignResult, num_expected: int,
bboxes: Tensor, feats: Tensor,
**kwargs) -> Union[Tensor, ndarray]:
"""Sample negative samples.
Score-HLR sampler is done in the following steps:
1. Take the maximum positive score prediction of each negative samples
as s_i.
2. Filter out negative samples whose s_i <= score_thr, the left samples
are called valid samples.
3. Use NMS-Match to divide valid samples into different groups,
samples in the same group will greatly overlap with each other
4. Rank the matched samples in two-steps to get Score-HLR.
(1) In the same group, rank samples with their scores.
(2) In the same score rank across different groups,
rank samples with their scores again.
5. Linearly map Score-HLR to the final label weights.
Args:
assign_result (:obj:`AssignResult`): result of assigner.
num_expected (int): Expected number of samples.
bboxes (Tensor): bbox to be sampled.
feats (Tensor): Features come from FPN.
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
num_neg = neg_inds.size(0)
if num_neg == 0:
return neg_inds, None
with torch.no_grad():
neg_bboxes = bboxes[neg_inds]
neg_rois = bbox2roi([neg_bboxes])
bbox_result = self.context._bbox_forward(feats, neg_rois)
cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
'bbox_pred']
ori_loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=neg_inds.new_full((num_neg, ),
self.bbox_head.num_classes),
label_weights=cls_score.new_ones(num_neg),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
# filter out samples with the max score lower than score_thr
max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
valid_inds = (max_score > self.score_thr).nonzero().view(-1)
invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
num_valid = valid_inds.size(0)
num_invalid = invalid_inds.size(0)
num_expected = min(num_neg, num_expected)
num_hlr = min(num_valid, num_expected)
num_rand = num_expected - num_hlr
if num_valid > 0:
valid_rois = neg_rois[valid_inds]
valid_max_score = max_score[valid_inds]
valid_argmax_score = argmax_score[valid_inds]
valid_bbox_pred = bbox_pred[valid_inds]
# valid_bbox_pred shape: [num_valid, #num_classes, 4]
valid_bbox_pred = valid_bbox_pred.view(
valid_bbox_pred.size(0), -1, 4)
selected_bbox_pred = valid_bbox_pred[range(num_valid),
valid_argmax_score]
pred_bboxes = self.bbox_head.bbox_coder.decode(
valid_rois[:, 1:], selected_bbox_pred)
pred_bboxes_with_score = torch.cat(
[pred_bboxes, valid_max_score[:, None]], -1)
group = nms_match(pred_bboxes_with_score, self.iou_thr)
# imp: importance
imp = cls_score.new_zeros(num_valid)
for g in group:
g_score = valid_max_score[g]
# g_score has already sorted
rank = g_score.new_tensor(range(g_score.size(0)))
imp[g] = num_valid - rank + g_score
_, imp_rank_inds = imp.sort(descending=True)
_, imp_rank = imp_rank_inds.sort()
hlr_inds = imp_rank_inds[:num_expected]
if num_rand > 0:
rand_inds = torch.randperm(num_invalid)[:num_rand]
select_inds = torch.cat(
[valid_inds[hlr_inds], invalid_inds[rand_inds]])
else:
select_inds = valid_inds[hlr_inds]
neg_label_weights = cls_score.new_ones(num_expected)
up_bound = max(num_expected, num_valid)
imp_weights = (up_bound -
imp_rank[hlr_inds].float()) / up_bound
neg_label_weights[:num_hlr] = imp_weights
neg_label_weights[num_hlr:] = imp_weights.min()
neg_label_weights = (self.bias +
(1 - self.bias) * neg_label_weights).pow(
self.k)
ori_selected_loss = ori_loss[select_inds]
new_loss = ori_selected_loss * neg_label_weights
norm_ratio = ori_selected_loss.sum() / new_loss.sum()
neg_label_weights *= norm_ratio
else:
neg_label_weights = cls_score.new_ones(num_expected)
select_inds = torch.randperm(num_neg)[:num_expected]
return neg_inds[select_inds], neg_label_weights
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, **kwargs) -> SamplingResult:
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors or points, or the bboxes predicted by the
previous stage, has shape (n, 4). The bboxes predicted by
the current model or stage will be named ``bboxes``,
``labels``, and ``scores``, the same as the ``InstanceData``
in other places.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes``, with shape (k, 4),
and ``labels``, with shape (k, ).
Returns:
:obj:`SamplingResult`: Sampling result.
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
gt_labels = gt_instances.labels
gt_flags = priors.new_zeros((priors.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
priors = torch.cat([gt_bboxes, priors], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = priors.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=priors, **kwargs)
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=priors, **kwargs)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags)
return sampling_result, neg_label_weights
| 12,663 | 42.5189 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .mask_pseudo_sampler import MaskPseudoSampler
from .mask_sampling_result import MaskSamplingResult
from .multi_instance_random_sampler import MultiInsRandomSampler
from .multi_instance_sampling_result import MultiInstanceSamplingResult
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
'MaskSamplingResult', 'MultiInstanceSamplingResult',
'MultiInsRandomSampler'
]
| 1,024 | 43.565217 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/sampling_result.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch
from torch import Tensor
from mmdet.structures.bbox import BaseBoxes, cat_boxes
from mmdet.utils import util_mixins
from mmdet.utils.util_random import ensure_rng
from ..assigners import AssignResult
def random_boxes(num=1, scale=1, rng=None):
"""Simple version of ``kwimage.Boxes.random``
Returns:
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
References:
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
Example:
>>> num = 3
>>> scale = 512
>>> rng = 0
>>> boxes = random_boxes(num, scale, rng)
>>> print(boxes)
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
[216.9113, 330.6978, 224.0446, 456.5878],
[405.3632, 196.3221, 493.3953, 270.7942]])
"""
rng = ensure_rng(rng)
tlbr = rng.rand(num, 4).astype(np.float32)
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
tlbr[:, 0] = tl_x * scale
tlbr[:, 1] = tl_y * scale
tlbr[:, 2] = br_x * scale
tlbr[:, 3] = br_y * scale
boxes = torch.from_numpy(tlbr)
return boxes
class SamplingResult(util_mixins.NiceRepr):
"""Bbox sampling result.
Args:
pos_inds (Tensor): Indices of positive samples.
neg_inds (Tensor): Indices of negative samples.
priors (Tensor): The priors can be anchors or points,
or the bboxes predicted by the previous stage.
gt_bboxes (Tensor): Ground truth of bboxes.
assign_result (:obj:`AssignResult`): Assigning results.
gt_flags (Tensor): The Ground truth flags.
avg_factor_with_neg (bool): If True, ``avg_factor`` equal to
the number of total priors; Otherwise, it is the number of
positive priors. Defaults to True.
Example:
>>> # xdoctest: +IGNORE_WANT
>>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random(rng=10)
>>> print(f'self = {self}')
self = <SamplingResult({
'neg_inds': tensor([1, 2, 3, 5, 6, 7, 8,
9, 10, 11, 12, 13]),
'neg_priors': torch.Size([12, 4]),
'num_gts': 1,
'num_neg': 12,
'num_pos': 1,
'avg_factor': 13,
'pos_assigned_gt_inds': tensor([0]),
'pos_inds': tensor([0]),
'pos_is_gt': tensor([1], dtype=torch.uint8),
'pos_priors': torch.Size([1, 4])
})>
"""
def __init__(self,
pos_inds: Tensor,
neg_inds: Tensor,
priors: Tensor,
gt_bboxes: Tensor,
assign_result: AssignResult,
gt_flags: Tensor,
avg_factor_with_neg: bool = True) -> None:
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.num_pos = max(pos_inds.numel(), 1)
self.num_neg = max(neg_inds.numel(), 1)
self.avg_factor_with_neg = avg_factor_with_neg
self.avg_factor = self.num_pos + self.num_neg \
if avg_factor_with_neg else self.num_pos
self.pos_priors = priors[pos_inds]
self.neg_priors = priors[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
self.pos_gt_labels = assign_result.labels[pos_inds]
box_dim = gt_bboxes.box_dim if isinstance(gt_bboxes, BaseBoxes) else 4
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = gt_bboxes.view(-1, box_dim)
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, box_dim)
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long()]
@property
def priors(self):
"""torch.Tensor: concatenated positive and negative priors"""
return cat_boxes([self.pos_priors, self.neg_priors])
@property
def bboxes(self):
"""torch.Tensor: concatenated positive and negative boxes"""
warnings.warn('DeprecationWarning: bboxes is deprecated, '
'please use "priors" instead')
return self.priors
@property
def pos_bboxes(self):
warnings.warn('DeprecationWarning: pos_bboxes is deprecated, '
'please use "pos_priors" instead')
return self.pos_priors
@property
def neg_bboxes(self):
warnings.warn('DeprecationWarning: neg_bboxes is deprecated, '
'please use "neg_priors" instead')
return self.neg_priors
def to(self, device):
"""Change the device of the data inplace.
Example:
>>> self = SamplingResult.random()
>>> print(f'self = {self.to(None)}')
>>> # xdoctest: +REQUIRES(--gpu)
>>> print(f'self = {self.to(0)}')
"""
_dict = self.__dict__
for key, value in _dict.items():
if isinstance(value, (torch.Tensor, BaseBoxes)):
_dict[key] = value.to(device)
return self
def __nice__(self):
data = self.info.copy()
data['pos_priors'] = data.pop('pos_priors').shape
data['neg_priors'] = data.pop('neg_priors').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self):
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_priors': self.pos_priors,
'neg_priors': self.neg_priors,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
'num_pos': self.num_pos,
'num_neg': self.num_neg,
'avg_factor': self.avg_factor
}
@classmethod
def random(cls, rng=None, **kwargs):
"""
Args:
rng (None | int | numpy.random.RandomState): seed or state.
kwargs (keyword arguments):
- num_preds: Number of predicted boxes.
- num_gts: Number of true boxes.
- p_ignore (float): Probability of a predicted box assigned to
an ignored truth.
- p_assigned (float): probability of a predicted box not being
assigned.
Returns:
:obj:`SamplingResult`: Randomly generated sampling result.
Example:
>>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random()
>>> print(self.__dict__)
"""
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import AssignResult
from mmdet.models.task_modules.samplers import RandomSampler
rng = ensure_rng(rng)
# make probabilistic?
num = 32
pos_fraction = 0.5
neg_pos_ub = -1
assign_result = AssignResult.random(rng=rng, **kwargs)
# Note we could just compute an assignment
priors = random_boxes(assign_result.num_preds, rng=rng)
gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
gt_labels = torch.randint(
0, 5, (assign_result.num_gts, ), dtype=torch.long)
pred_instances = InstanceData()
pred_instances.priors = priors
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
add_gt_as_proposals = True
sampler = RandomSampler(
num,
pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals,
rng=rng)
self = sampler.sample(
assign_result=assign_result,
pred_instances=pred_instances,
gt_instances=gt_instances)
return self
| 8,409 | 33.896266 | 101 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/samplers/pseudo_sampler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.structures import InstanceData
from mmdet.registry import TASK_UTILS
from ..assigners import AssignResult
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@TASK_UTILS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData,
gt_instances: InstanceData, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors, points, or bboxes predicted by the model,
shape(n, 4).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
:obj:`SamplingResult`: sampler results
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(
pos_inds=pos_inds,
neg_inds=neg_inds,
priors=priors,
gt_bboxes=gt_bboxes,
assign_result=assign_result,
gt_flags=gt_flags,
avg_factor_with_neg=False)
return sampling_result
| 2,145 | 34.180328 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/yolo_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps: float = 1e-6, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def encode(self, bboxes: Union[Tensor, BaseBoxes],
gt_bboxes: Union[Tensor, BaseBoxes],
stride: Union[Tensor, int]) -> Tensor:
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., anchors.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
def decode(self, bboxes: Union[Tensor, BaseBoxes], pred_bboxes: Tensor,
stride: Union[Tensor, int]) -> Union[Tensor, BaseBoxes]:
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes,
e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
| 3,864 | 39.684211 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/distance_point_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border: Optional[bool] = True, **kwargs) -> None:
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self,
points: Tensor,
gt_bboxes: Union[Tensor, BaseBoxes],
max_dis: Optional[float] = None,
eps: float = 0.1) -> Tensor:
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(
self,
points: Tensor,
pred_bboxes: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None
) -> Union[Tensor, BaseBoxes]:
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
| 3,205 | 36.27907 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/bucketing_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox_rescale,
get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class BucketingBBoxCoder(BaseBBoxCoder):
"""Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented here.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
num_buckets (int): Number of buckets.
scale_factor (int): Scale factor of proposals to generate buckets.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset upperbound to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self,
num_buckets: int,
scale_factor: int,
offset_topk: int = 2,
offset_upperbound: float = 1.0,
cls_ignore_neighbor: bool = True,
clip_border: bool = True,
**kwargs) -> None:
super().__init__(**kwargs)
self.num_buckets = num_buckets
self.scale_factor = scale_factor
self.offset_topk = offset_topk
self.offset_upperbound = offset_upperbound
self.cls_ignore_neighbor = cls_ignore_neighbor
self.clip_border = clip_border
def encode(self, bboxes: Union[Tensor, BaseBoxes],
gt_bboxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor]:
"""Get bucketing estimation and fine regression targets during
training.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes,
e.g., object proposals.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the
transformation, e.g., ground truth boxes.
Returns:
encoded_bboxes(tuple[Tensor]): bucketing estimation
and fine regression targets and weights
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
self.scale_factor, self.offset_topk,
self.offset_upperbound,
self.cls_ignore_neighbor)
return encoded_bboxes
def decode(
self,
bboxes: Union[Tensor, BaseBoxes],
pred_bboxes: Tensor,
max_shape: Optional[Tuple[int]] = None
) -> Tuple[Union[Tensor, BaseBoxes], Tensor]:
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.
pred_bboxes (torch.Tensor): Predictions for bucketing estimation
and fine regression
max_shape (tuple[int], optional): Maximum shape of boxes.
Defaults to None.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert len(pred_bboxes) == 2
cls_preds, offset_preds = pred_bboxes
assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
0) == bboxes.size(0)
bboxes, loc_confidence = bucket2bbox(bboxes, cls_preds, offset_preds,
self.num_buckets,
self.scale_factor, max_shape,
self.clip_border)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes, clone=False)
return bboxes, loc_confidence
def generat_buckets(proposals: Tensor,
num_buckets: int,
scale_factor: float = 1.0) -> Tuple[Tensor]:
"""Generate buckets w.r.t bucket number and scale factor of proposals.
Args:
proposals (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
Returns:
tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
t_buckets, d_buckets)
- bucket_w: Width of buckets on x-axis. Shape (n, ).
- bucket_h: Height of buckets on y-axis. Shape (n, ).
- l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
- r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
- t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
- d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
"""
proposals = bbox_rescale(proposals, scale_factor)
# number of buckets in each side
side_num = int(np.ceil(num_buckets / 2.0))
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
px1 = proposals[..., 0]
py1 = proposals[..., 1]
px2 = proposals[..., 2]
py2 = proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
# left buckets
l_buckets = px1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# right buckets
r_buckets = px2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# top buckets
t_buckets = py1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
# down buckets
d_buckets = py2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
def bbox2bucket(proposals: Tensor,
gt: Tensor,
num_buckets: int,
scale_factor: float,
offset_topk: int = 2,
offset_upperbound: float = 1.0,
cls_ignore_neighbor: bool = True) -> Tuple[Tensor]:
"""Generate buckets estimation and fine regression targets.
Args:
proposals (Tensor): Shape (n, 4)
gt (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset allowance to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
Returns:
tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
- offsets: Fine regression targets. \
Shape (n, num_buckets*2).
- offsets_weights: Fine regression weights. \
Shape (n, num_buckets*2).
- bucket_labels: Bucketing estimation labels. \
Shape (n, num_buckets*2).
- cls_weights: Bucketing estimation weights. \
Shape (n, num_buckets*2).
"""
assert proposals.size() == gt.size()
# generate buckets
proposals = proposals.float()
gt = gt.float()
(bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
gx1 = gt[..., 0]
gy1 = gt[..., 1]
gx2 = gt[..., 2]
gy2 = gt[..., 3]
# generate offset targets and weights
# offsets from buckets to gts
l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
# select top-k nearest buckets
l_topk, l_label = l_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
r_topk, r_label = r_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
t_topk, t_label = t_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
d_topk, d_label = d_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
offset_l_weights = l_offsets.new_zeros(l_offsets.size())
offset_r_weights = r_offsets.new_zeros(r_offsets.size())
offset_t_weights = t_offsets.new_zeros(t_offsets.size())
offset_d_weights = d_offsets.new_zeros(d_offsets.size())
inds = torch.arange(0, proposals.size(0)).to(proposals).long()
# generate offset weights of top-k nearest buckets
for k in range(offset_topk):
if k >= 1:
offset_l_weights[inds, l_label[:,
k]] = (l_topk[:, k] <
offset_upperbound).float()
offset_r_weights[inds, r_label[:,
k]] = (r_topk[:, k] <
offset_upperbound).float()
offset_t_weights[inds, t_label[:,
k]] = (t_topk[:, k] <
offset_upperbound).float()
offset_d_weights[inds, d_label[:,
k]] = (d_topk[:, k] <
offset_upperbound).float()
else:
offset_l_weights[inds, l_label[:, k]] = 1.0
offset_r_weights[inds, r_label[:, k]] = 1.0
offset_t_weights[inds, t_label[:, k]] = 1.0
offset_d_weights[inds, d_label[:, k]] = 1.0
offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
offsets_weights = torch.cat([
offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
],
dim=-1)
# generate bucket labels and weight
side_num = int(np.ceil(num_buckets / 2.0))
labels = torch.stack(
[l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
batch_size = labels.size(0)
bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
-1).float()
bucket_cls_l_weights = (l_offsets.abs() < 1).float()
bucket_cls_r_weights = (r_offsets.abs() < 1).float()
bucket_cls_t_weights = (t_offsets.abs() < 1).float()
bucket_cls_d_weights = (d_offsets.abs() < 1).float()
bucket_cls_weights = torch.cat([
bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
bucket_cls_d_weights
],
dim=-1)
# ignore second nearest buckets for cls if necessary
if cls_ignore_neighbor:
bucket_cls_weights = (~((bucket_cls_weights == 1) &
(bucket_labels == 0))).float()
else:
bucket_cls_weights[:] = 1.0
return offsets, offsets_weights, bucket_labels, bucket_cls_weights
def bucket2bbox(proposals: Tensor,
cls_preds: Tensor,
offset_preds: Tensor,
num_buckets: int,
scale_factor: float = 1.0,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
clip_border: bool = True) -> Tuple[Tensor]:
"""Apply bucketing estimation (cls preds) and fine regression (offset
preds) to generate det bboxes.
Args:
proposals (Tensor): Boxes to be transformed. Shape (n, 4)
cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Returns:
tuple[Tensor]: (bboxes, loc_confidence).
- bboxes: predicted bboxes. Shape (n, 4)
- loc_confidence: localization confidence of predicted bboxes.
Shape (n,).
"""
side_num = int(np.ceil(num_buckets / 2.0))
cls_preds = cls_preds.view(-1, side_num)
offset_preds = offset_preds.view(-1, side_num)
scores = F.softmax(cls_preds, dim=1)
score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
rescaled_proposals = bbox_rescale(proposals, scale_factor)
pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
px1 = rescaled_proposals[..., 0]
py1 = rescaled_proposals[..., 1]
px2 = rescaled_proposals[..., 2]
py2 = rescaled_proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
score_inds_l = score_label[0::4, 0]
score_inds_r = score_label[1::4, 0]
score_inds_t = score_label[2::4, 0]
score_inds_d = score_label[3::4, 0]
l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
offsets = offset_preds.view(-1, 4, side_num)
inds = torch.arange(proposals.size(0)).to(proposals).long()
l_offsets = offsets[:, 0, :][inds, score_inds_l]
r_offsets = offsets[:, 1, :][inds, score_inds_r]
t_offsets = offsets[:, 2, :][inds, score_inds_t]
d_offsets = offsets[:, 3, :][inds, score_inds_d]
x1 = l_buckets - l_offsets * bucket_w
x2 = r_buckets - r_offsets * bucket_w
y1 = t_buckets - t_offsets * bucket_h
y2 = d_buckets - d_offsets * bucket_h
if clip_border and max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
dim=-1)
# bucketing guided rescoring
loc_confidence = score_topk[:, 0]
top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
return bboxes, loc_confidence
| 15,197 | 40.411444 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/pseudo_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def encode(self, bboxes: Tensor, gt_bboxes: Union[Tensor,
BaseBoxes]) -> Tensor:
"""torch.Tensor: return the given ``bboxes``"""
gt_bboxes = get_box_tensor(gt_bboxes)
return gt_bboxes
def decode(self, bboxes: Tensor, pred_bboxes: Union[Tensor,
BaseBoxes]) -> Tensor:
"""torch.Tensor: return the given ``pred_bboxes``"""
if self.use_box_type:
pred_bboxes = HorizontalBoxes(pred_bboxes)
return pred_bboxes
| 1,019 | 33 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/base_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder.
Args:
use_box_type (bool): Whether to warp decoded boxes with the
box type data structure. Defaults to False.
"""
# The size of the last of dimension of the encoded tensor.
encode_size = 4
def __init__(self, use_box_type: bool = False, **kwargs):
self.use_box_type = use_box_type
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
| 776 | 27.777778 | 71 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/tblr_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class TBLRBBoxCoder(BaseBBoxCoder):
"""TBLR BBox coder.
Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_,
this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
normalizer (list | float): Normalization factor to be
divided with when coding the coordinates. If it is a list, it should
have length of 4 indicating normalization factor in tblr dims.
Otherwise it is a unified float factor for all dims. Default: 4.0
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self,
normalizer: Union[Sequence[float], float] = 4.0,
clip_border: bool = True,
**kwargs) -> None:
super().__init__(**kwargs)
self.normalizer = normalizer
self.clip_border = clip_border
def encode(self, bboxes: Union[Tensor, BaseBoxes],
gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor:
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,
bottom, right) order.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes,
e.g., object proposals.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the
transformation, e.g., ground truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bboxes2tblr(
bboxes, gt_bboxes, normalizer=self.normalizer)
return encoded_bboxes
def decode(
self,
bboxes: Union[Tensor, BaseBoxes],
pred_bboxes: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None
) -> Union[Tensor, BaseBoxes]:
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.Shape
(B, N, 4) or (N, 4)
pred_bboxes (torch.Tensor): Encoded boxes with shape
(B, N, 4) or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
decoded_bboxes = tblr2bboxes(
bboxes,
pred_bboxes,
normalizer=self.normalizer,
max_shape=max_shape,
clip_border=self.clip_border)
if self.use_box_type:
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
def bboxes2tblr(priors: Tensor,
gts: Tensor,
normalizer: Union[Sequence[float], float] = 4.0,
normalize_by_wh: bool = True) -> Tensor:
"""Encode ground truth boxes to tblr coordinate.
It first convert the gt coordinate to tblr format,
(top, bottom, left, right), relative to prior box centers.
The tblr coordinate may be normalized by the side length of prior bboxes
if `normalize_by_wh` is specified as True, and it is then normalized by
the `normalizer` factor.
Args:
priors (Tensor): Prior boxes in point form
Shape: (num_proposals,4).
gts (Tensor): Coords of ground truth for each prior in point-form
Shape: (num_proposals, 4).
normalizer (Sequence[float] | float): normalization parameter of
encoded boxes. If it is a list, it has to have length = 4.
Default: 4.0
normalize_by_wh (bool): Whether to normalize tblr coordinate by the
side length (wh) of prior bboxes.
Return:
encoded boxes (Tensor), Shape: (num_proposals, 4)
"""
# dist b/t match center and prior's center
if not isinstance(normalizer, float):
normalizer = torch.tensor(normalizer, device=priors.device)
assert len(normalizer) == 4, 'Normalizer must have length = 4'
assert priors.size(0) == gts.size(0)
prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2
xmin, ymin, xmax, ymax = gts.split(1, dim=1)
top = prior_centers[:, 1].unsqueeze(1) - ymin
bottom = ymax - prior_centers[:, 1].unsqueeze(1)
left = prior_centers[:, 0].unsqueeze(1) - xmin
right = xmax - prior_centers[:, 0].unsqueeze(1)
loc = torch.cat((top, bottom, left, right), dim=1)
if normalize_by_wh:
# Normalize tblr by anchor width and height
wh = priors[:, 2:4] - priors[:, 0:2]
w, h = torch.split(wh, 1, dim=1)
loc[:, :2] /= h # tb is normalized by h
loc[:, 2:] /= w # lr is normalized by w
# Normalize tblr by the given normalization factor
return loc / normalizer
def tblr2bboxes(priors: Tensor,
tblr: Tensor,
normalizer: Union[Sequence[float], float] = 4.0,
normalize_by_wh: bool = True,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
clip_border: bool = True) -> Tensor:
"""Decode tblr outputs to prediction boxes.
The process includes 3 steps: 1) De-normalize tblr coordinates by
multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the
prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert
tblr (top, bottom, left, right) pair relative to the center of priors back
to (xmin, ymin, xmax, ymax) coordinate.
Args:
priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)
Shape: (N,4) or (B, N, 4).
tblr (Tensor): Coords of network output in tblr form
Shape: (N, 4) or (B, N, 4).
normalizer (Sequence[float] | float): Normalization parameter of
encoded boxes. By list, it represents the normalization factors at
tblr dims. By float, it is the unified normalization factor at all
dims. Default: 4.0
normalize_by_wh (bool): Whether the tblr coordinates have been
normalized by the side length (wh) of prior bboxes.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Return:
encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
"""
if not isinstance(normalizer, float):
normalizer = torch.tensor(normalizer, device=priors.device)
assert len(normalizer) == 4, 'Normalizer must have length = 4'
assert priors.size(0) == tblr.size(0)
if priors.ndim == 3:
assert priors.size(1) == tblr.size(1)
loc_decode = tblr * normalizer
prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2
if normalize_by_wh:
wh = priors[..., 2:4] - priors[..., 0:2]
w, h = torch.split(wh, 1, dim=-1)
# Inplace operation with slice would failed for exporting to ONNX
th = h * loc_decode[..., :2] # tb
tw = w * loc_decode[..., 2:] # lr
loc_decode = torch.cat([th, tw], dim=-1)
# Cannot be exported using onnx when loc_decode.split(1, dim=-1)
top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)
xmin = prior_centers[..., 0].unsqueeze(-1) - left
xmax = prior_centers[..., 0].unsqueeze(-1) + right
ymin = prior_centers[..., 1].unsqueeze(-1) - top
ymax = prior_centers[..., 1].unsqueeze(-1) + bottom
bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
if clip_border and max_shape is not None:
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
xmin, ymin, xmax, ymax = dynamic_clip_for_onnx(
xmin, ymin, xmax, ymax, max_shape)
bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = priors.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(priors)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = priors.new_tensor(0)
max_xy = torch.cat([max_shape, max_shape],
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
| 9,724 | 41.467249 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/legacy_delta_xywh_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import numpy as np
import torch
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
"""Legacy Delta XYWH BBox coder used in MMDet V1.x.
Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,
y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)
back to original bbox (x1, y1, x2, y2).
Note:
The main difference between :class`LegacyDeltaXYWHBBoxCoder` and
:class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and
height calculation. We suggest to only use this coder when testing with
MMDet V1.x models.
References:
.. [1] https://arxiv.org/abs/1311.2524
Args:
target_means (Sequence[float]): denormalizing means of target for
delta coordinates
target_stds (Sequence[float]): denormalizing standard deviation of
target for delta coordinates
"""
def __init__(self,
target_means: Sequence[float] = (0., 0., 0., 0.),
target_stds: Sequence[float] = (1., 1., 1., 1.),
**kwargs) -> None:
super().__init__(**kwargs)
self.means = target_means
self.stds = target_stds
def encode(self, bboxes: Union[Tensor, BaseBoxes],
gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor:
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): source boxes,
e.g., object proposals.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): target of the
transformation, e.g., ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,
self.stds)
return encoded_bboxes
def decode(
self,
bboxes: Union[Tensor, BaseBoxes],
pred_bboxes: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
wh_ratio_clip: Optional[float] = 16 / 1000
) -> Union[Tensor, BaseBoxes]:
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes.
pred_bboxes (torch.Tensor): Encoded boxes with shape
max_shape (tuple[int], optional): Maximum shape of boxes.
Defaults to None.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(0) == bboxes.size(0)
decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip)
if self.use_box_type:
assert decoded_bboxes.size(-1) == 4, \
('Cannot warp decoded boxes with box type when decoded boxes'
'have shape of (N, num_classes * 4)')
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
def legacy_bbox2delta(
proposals: Tensor,
gt: Tensor,
means: Sequence[float] = (0., 0., 0., 0.),
stds: Sequence[float] = (1., 1., 1., 1.)
) -> Tensor:
"""Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of `delta2bbox()`
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0] + 1.0
ph = proposals[..., 3] - proposals[..., 1] + 1.0
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0] + 1.0
gh = gt[..., 3] - gt[..., 1] + 1.0
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def legacy_delta2bbox(rois: Tensor,
deltas: Tensor,
means: Sequence[float] = (0., 0., 0., 0.),
stds: Sequence[float] = (1., 1., 1., 1.),
max_shape: Optional[
Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
wh_ratio_clip: float = 16 / 1000) -> Tensor:
"""Apply deltas to shift/scale base boxes in the MMDet V1.x manner.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of `bbox2delta()`
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when
rois is a grid of anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Returns:
Tensor: Boxes with shape (N, 4), where columns represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))
tensor([[0.0000, 0.0000, 1.5000, 1.5000],
[0.0000, 0.0000, 5.2183, 5.2183],
[0.0000, 0.1321, 7.8891, 0.8679],
[5.3967, 2.4251, 6.0033, 3.7749]])
"""
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::4]
dy = denorm_deltas[:, 1::4]
dw = denorm_deltas[:, 2::4]
dh = denorm_deltas[:, 3::4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Compute center of each roi
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
# Compute width/height of each roi
pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + pw * dx
gy = py + ph * dy
# Convert center-xy/width/height to top-left, bottom-right
# The true legacy box coder should +- 0.5 here.
# However, current implementation improves the performance when testing
# the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
| 9,370 | 38.707627 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/delta_xywh_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Optional, Sequence, Union
import numpy as np
import torch
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DeltaXYWHBBoxCoder(BaseBBoxCoder):
"""Delta XYWH BBox coder.
Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,
this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and
decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).
Args:
target_means (Sequence[float]): Denormalizing means of target for
delta coordinates
target_stds (Sequence[float]): Denormalizing standard deviation of
target for delta coordinates
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
"""
def __init__(self,
target_means: Sequence[float] = (0., 0., 0., 0.),
target_stds: Sequence[float] = (1., 1., 1., 1.),
clip_border: bool = True,
add_ctr_clamp: bool = False,
ctr_clamp: int = 32,
**kwargs) -> None:
super().__init__(**kwargs)
self.means = target_means
self.stds = target_stds
self.clip_border = clip_border
self.add_ctr_clamp = add_ctr_clamp
self.ctr_clamp = ctr_clamp
def encode(self, bboxes: Union[Tensor, BaseBoxes],
gt_bboxes: Union[Tensor, BaseBoxes]) -> Tensor:
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., object proposals.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
return encoded_bboxes
def decode(
self,
bboxes: Union[Tensor, BaseBoxes],
pred_bboxes: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
wh_ratio_clip: Optional[float] = 16 / 1000
) -> Union[Tensor, BaseBoxes]:
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. Shape
(B, N, 4) or (N, 4)
pred_bboxes (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(0) == bboxes.size(0)
if pred_bboxes.ndim == 3:
assert pred_bboxes.size(1) == bboxes.size(1)
if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():
# single image decode
decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip,
self.clip_border, self.add_ctr_clamp,
self.ctr_clamp)
else:
if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():
warnings.warn(
'DeprecationWarning: onnx_delta2bbox is deprecated '
'in the case of batch decoding and non-ONNX, '
'please use “delta2bbox” instead. In order to improve '
'the decoding speed, the batch function will no '
'longer be supported. ')
decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape,
wh_ratio_clip, self.clip_border,
self.add_ctr_clamp,
self.ctr_clamp)
if self.use_box_type:
assert decoded_bboxes.size(-1) == 4, \
('Cannot warp decoded boxes with box type when decoded boxes'
'have shape of (N, num_classes * 4)')
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
def bbox2delta(
proposals: Tensor,
gt: Tensor,
means: Sequence[float] = (0., 0., 0., 0.),
stds: Sequence[float] = (1., 1., 1., 1.)
) -> Tensor:
"""Compute deltas of proposals w.r.t. gt.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of :func:`delta2bbox`.
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0]
gh = gt[..., 3] - gt[..., 1]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def delta2bbox(rois: Tensor,
deltas: Tensor,
means: Sequence[float] = (0., 0., 0., 0.),
stds: Sequence[float] = (1., 1., 1., 1.),
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
wh_ratio_clip: float = 16 / 1000,
clip_border: bool = True,
add_ctr_clamp: bool = False,
ctr_clamp: int = 32) -> Tensor:
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (tuple[int, int]): Maximum bounds for boxes, specifies
(H, W). Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp. When set to True,
the center of the prediction bounding box will be clamped to
avoid being too far away from the center of the anchor.
Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4
represent tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4
if num_bboxes == 0:
return deltas
deltas = deltas.reshape(-1, 4)
means = deltas.new_tensor(means).view(1, -1)
stds = deltas.new_tensor(stds).view(1, -1)
denorm_deltas = deltas * stds + means
dxy = denorm_deltas[:, :2]
dwh = denorm_deltas[:, 2:]
# Compute width/height of each roi
rois_ = rois.repeat(1, num_classes).reshape(-1, 4)
pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)
pwh = (rois_[:, 2:] - rois_[:, :2])
dxy_wh = pwh * dxy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)
dwh = torch.clamp(dwh, max=max_ratio)
else:
dwh = dwh.clamp(min=-max_ratio, max=max_ratio)
gxy = pxy + dxy_wh
gwh = pwh * dwh.exp()
x1y1 = gxy - (gwh * 0.5)
x2y2 = gxy + (gwh * 0.5)
bboxes = torch.cat([x1y1, x2y2], dim=-1)
if clip_border and max_shape is not None:
bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])
bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])
bboxes = bboxes.reshape(num_bboxes, -1)
return bboxes
def onnx_delta2bbox(rois: Tensor,
deltas: Tensor,
means: Sequence[float] = (0., 0., 0., 0.),
stds: Sequence[float] = (1., 1., 1., 1.),
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None,
wh_ratio_clip: float = 16 / 1000,
clip_border: Optional[bool] = True,
add_ctr_clamp: bool = False,
ctr_clamp: int = 32) -> Tensor:
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If rois shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B. Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Default 16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4), where 4 represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
means = deltas.new_tensor(means).view(1,
-1).repeat(1,
deltas.size(-1) // 4)
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[..., 0::4]
dy = denorm_deltas[..., 1::4]
dw = denorm_deltas[..., 2::4]
dh = denorm_deltas[..., 3::4]
x1, y1 = rois[..., 0], rois[..., 1]
x2, y2 = rois[..., 2], rois[..., 3]
# Compute center of each roi
px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
# Compute width/height of each roi
pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
dx_width = pw * dx
dy_height = ph * dy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)
dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)
dw = torch.clamp(dw, max=max_ratio)
dh = torch.clamp(dh, max=max_ratio)
else:
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + dx_width
gy = py + dy_height
# Convert center-xy/width/height to top-left, bottom-right
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
if clip_border and max_shape is not None:
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = x1.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(x1)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = x1.new_tensor(0)
max_xy = torch.cat(
[max_shape] * (deltas.size(-1) // 2),
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
| 17,417 | 41.174334 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/task_modules/coders/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_bbox_coder import BaseBBoxCoder
from .bucketing_bbox_coder import BucketingBBoxCoder
from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
from .distance_point_bbox_coder import DistancePointBBoxCoder
from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
from .pseudo_bbox_coder import PseudoBBoxCoder
from .tblr_bbox_coder import TBLRBBoxCoder
from .yolo_bbox_coder import YOLOBBoxCoder
__all__ = [
'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
'BucketingBBoxCoder', 'DistancePointBBoxCoder'
]
| 654 | 39.9375 | 66 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/standard_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures import DetDataSample, SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList
from ..task_modules.samplers import SamplingResult
from ..utils import empty_instances, unpack_gt_instances
from .base_roi_head import BaseRoIHead
@MODELS.register_module()
class StandardRoIHead(BaseRoIHead):
"""Simplest base roi head including one bbox head and one mask head."""
def init_assigner_sampler(self) -> None:
"""Initialize assigner and sampler."""
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
self.bbox_assigner = TASK_UTILS.build(self.train_cfg.assigner)
self.bbox_sampler = TASK_UTILS.build(
self.train_cfg.sampler, default_args=dict(context=self))
def init_bbox_head(self, bbox_roi_extractor: ConfigType,
bbox_head: ConfigType) -> None:
"""Initialize box head and box roi extractor.
Args:
bbox_roi_extractor (dict or ConfigDict): Config of box
roi extractor.
bbox_head (dict or ConfigDict): Config of box in box head.
"""
self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor)
self.bbox_head = MODELS.build(bbox_head)
def init_mask_head(self, mask_roi_extractor: ConfigType,
mask_head: ConfigType) -> None:
"""Initialize mask head and mask roi extractor.
Args:
mask_roi_extractor (dict or ConfigDict): Config of mask roi
extractor.
mask_head (dict or ConfigDict): Config of mask in mask head.
"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = MODELS.build(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = MODELS.build(mask_head)
# TODO: Need to refactor later
def forward(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList = None) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (List[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
results = ()
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
rois = bbox2roi(proposals)
# bbox head
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
results = results + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
results = results + (mask_results['mask_preds'], )
return results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: List[DetDataSample]) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, _ = outputs
# assign gts and sample proposals
num_imgs = len(batch_data_samples)
sampling_results = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head loss
if self.with_bbox:
bbox_results = self.bbox_loss(x, sampling_results)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self.mask_loss(x, sampling_results,
bbox_results['bbox_feats'],
batch_gt_instances)
losses.update(mask_results['loss_mask'])
return losses
def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def bbox_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult]) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_loss_and_target = self.bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg)
bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])
return bbox_results
def mask_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult], bbox_feats: Tensor,
batch_gt_instances: InstanceList) -> dict:
"""Perform forward propagation and loss calculation of the mask head on
the features of the upstream network.
Args:
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
bbox_feats (Tensor): Extract bbox RoI features.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `mask_feats` (Tensor): Extract mask RoI features.
- `mask_targets` (Tensor): Mask target of each positive\
proposals in the image.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_priors.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_priors.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_loss_and_target = self.mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg)
mask_results.update(loss_mask=mask_loss_and_target['loss_mask'])
return mask_results
def _mask_forward(self,
x: Tuple[Tensor],
rois: Tensor = None,
pos_inds: Optional[Tensor] = None,
bbox_feats: Optional[Tensor] = None) -> dict:
"""Mask head forward function used in both training and testing.
Args:
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
pos_inds (Tensor, optional): Indices of positive samples.
Defaults to None.
bbox_feats (Tensor): Extract bbox RoI features. Defaults to None.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `mask_feats` (Tensor): Extract mask RoI features.
"""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert bbox_feats is not None
mask_feats = bbox_feats[pos_inds]
mask_preds = self.mask_head(mask_feats)
mask_results = dict(mask_preds=mask_preds, mask_feats=mask_feats)
return mask_results
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
proposals = [res.bboxes for res in rpn_results_list]
rois = bbox2roi(proposals)
if rois.shape[0] == 0:
return empty_instances(
batch_img_metas,
rois.device,
task_type='bbox',
box_type=self.bbox_head.predict_box_type,
num_classes=self.bbox_head.num_classes,
score_per_cls=rcnn_test_cfg is None)
bbox_results = self._bbox_forward(x, rois)
# split batch bbox prediction back to each image
cls_scores = bbox_results['cls_score']
bbox_preds = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
# some detector with_reg is False, bbox_preds will be None
if bbox_preds is not None:
# TODO move this to a sabl_roi_head
# the bbox prediction of some detectors like SABL is not Tensor
if isinstance(bbox_preds, torch.Tensor):
bbox_preds = bbox_preds.split(num_proposals_per_img, 0)
else:
bbox_preds = self.bbox_head.bbox_pred_split(
bbox_preds, num_proposals_per_img)
else:
bbox_preds = (None, ) * len(proposals)
result_list = self.bbox_head.predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=rcnn_test_cfg,
rescale=rescale)
return result_list
def predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: InstanceList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
# don't need to consider aug_test.
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
# split batch mask prediction back to each image
num_mask_rois_per_img = [len(res) for res in results_list]
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
# TODO: Handle the case where rescale is false
results_list = self.mask_head.predict_by_feat(
mask_preds=mask_preds,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale)
return results_list
| 17,440 | 40.52619 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/grid_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList
from ..task_modules.samplers import SamplingResult
from ..utils.misc import unpack_gt_instances
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class GridRoIHead(StandardRoIHead):
"""Implementation of `Grid RoI Head <https://arxiv.org/abs/1811.12030>`_
Args:
grid_roi_extractor (:obj:`ConfigDict` or dict): Config of
roi extractor.
grid_head (:obj:`ConfigDict` or dict): Config of grid head
"""
def __init__(self, grid_roi_extractor: ConfigType, grid_head: ConfigType,
**kwargs) -> None:
assert grid_head is not None
super().__init__(**kwargs)
if grid_roi_extractor is not None:
self.grid_roi_extractor = MODELS.build(grid_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.grid_roi_extractor = self.bbox_roi_extractor
self.grid_head = MODELS.build(grid_head)
def _random_jitter(self,
sampling_results: List[SamplingResult],
batch_img_metas: List[dict],
amplitude: float = 0.15) -> List[SamplingResult]:
"""Ramdom jitter positive proposals for training.
Args:
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
batch_img_metas (list[dict]): List of image information.
amplitude (float): Amplitude of random offset. Defaults to 0.15.
Returns:
list[obj:SamplingResult]: SamplingResults after random jittering.
"""
for sampling_result, img_meta in zip(sampling_results,
batch_img_metas):
bboxes = sampling_result.pos_priors
random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
-amplitude, amplitude)
# before jittering
cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
# after jittering
new_cxcy = cxcy + wh * random_offsets[:, :2]
new_wh = wh * (1 + random_offsets[:, 2:])
# xywh to xyxy
new_x1y1 = (new_cxcy - new_wh / 2)
new_x2y2 = (new_cxcy + new_wh / 2)
new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
# clip bboxes
max_shape = img_meta['img_shape']
if max_shape is not None:
new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
sampling_result.pos_priors = new_bboxes
return sampling_results
# TODO: Forward is incorrect and need to refactor.
def forward(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList = None) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (Tuple[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
results = ()
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
rois = bbox2roi(proposals)
# bbox head
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
results = results + (bbox_results['cls_score'], )
if self.bbox_head.with_reg:
results = results + (bbox_results['bbox_pred'], )
# grid head
grid_rois = rois[:100]
grid_feats = self.grid_roi_extractor(
x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
self.grid_head.test_mode = True
grid_preds = self.grid_head(grid_feats)
results = results + (grid_preds, )
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
results = results + (mask_results['mask_preds'], )
return results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
(batch_gt_instances, batch_gt_instances_ignore,
batch_img_metas) = outputs
# assign gts and sample proposals
num_imgs = len(batch_data_samples)
sampling_results = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head loss
if self.with_bbox:
bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self.mask_loss(x, sampling_results,
bbox_results['bbox_feats'],
batch_gt_instances)
losses.update(mask_results['loss_mask'])
return losses
def bbox_loss(self,
x: Tuple[Tensor],
sampling_results: List[SamplingResult],
batch_img_metas: Optional[List[dict]] = None) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list[:obj:`SamplingResult`]): Sampling results.
batch_img_metas (list[dict], optional): Meta information of each
image, e.g., image size, scaling factor, etc.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
assert batch_img_metas is not None
bbox_results = super().bbox_loss(x, sampling_results)
# Grid head forward and loss
sampling_results = self._random_jitter(sampling_results,
batch_img_metas)
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
# GN in head does not support zero shape input
if pos_rois.shape[0] == 0:
return bbox_results
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
# Accelerate training
max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
sample_idx = torch.randperm(
grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
)]
grid_feats = grid_feats[sample_idx]
grid_pred = self.grid_head(grid_feats)
loss_grid = self.grid_head.loss(grid_pred, sample_idx,
sampling_results, self.train_cfg)
bbox_results['loss_bbox'].update(loss_grid)
return bbox_results
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (:obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape \
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4), the last \
dimension 4 arrange as (x1, y1, x2, y2).
"""
results_list = super().predict_bbox(
x,
batch_img_metas=batch_img_metas,
rpn_results_list=rpn_results_list,
rcnn_test_cfg=rcnn_test_cfg,
rescale=False)
grid_rois = bbox2roi([res.bboxes for res in results_list])
if grid_rois.shape[0] != 0:
grid_feats = self.grid_roi_extractor(
x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
self.grid_head.test_mode = True
grid_preds = self.grid_head(grid_feats)
results_list = self.grid_head.predict_by_feat(
grid_preds=grid_preds,
results_list=results_list,
batch_img_metas=batch_img_metas,
rescale=rescale)
return results_list
| 11,647 | 40.451957 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/scnet_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList, OptConfigType
from ..layers import adaptive_avg_pool2d
from ..task_modules.samplers import SamplingResult
from ..utils import empty_instances, unpack_gt_instances
from .cascade_roi_head import CascadeRoIHead
@MODELS.register_module()
class SCNetRoIHead(CascadeRoIHead):
"""RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_stages (int): number of cascade stages.
stage_loss_weights (list): loss weight of cascade stages.
semantic_roi_extractor (dict): config to init semantic roi extractor.
semantic_head (dict): config to init semantic head.
feat_relay_head (dict): config to init feature_relay_head.
glbctx_head (dict): config to init global context head.
"""
def __init__(self,
num_stages: int,
stage_loss_weights: List[float],
semantic_roi_extractor: OptConfigType = None,
semantic_head: OptConfigType = None,
feat_relay_head: OptConfigType = None,
glbctx_head: OptConfigType = None,
**kwargs) -> None:
super().__init__(
num_stages=num_stages,
stage_loss_weights=stage_loss_weights,
**kwargs)
assert self.with_bbox and self.with_mask
assert not self.with_shared_head # shared head is not supported
if semantic_head is not None:
self.semantic_roi_extractor = MODELS.build(semantic_roi_extractor)
self.semantic_head = MODELS.build(semantic_head)
if feat_relay_head is not None:
self.feat_relay_head = MODELS.build(feat_relay_head)
if glbctx_head is not None:
self.glbctx_head = MODELS.build(glbctx_head)
def init_mask_head(self, mask_roi_extractor: ConfigType,
mask_head: ConfigType) -> None:
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = MODELS.build(mask_roi_extractor)
self.mask_head = MODELS.build(mask_head)
# TODO move to base_roi_head later
@property
def with_semantic(self) -> bool:
"""bool: whether the head has semantic head"""
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_feat_relay(self) -> bool:
"""bool: whether the head has feature relay head"""
return (hasattr(self, 'feat_relay_head')
and self.feat_relay_head is not None)
@property
def with_glbctx(self) -> bool:
"""bool: whether the head has global context head"""
return hasattr(self, 'glbctx_head') and self.glbctx_head is not None
def _fuse_glbctx(self, roi_feats: Tensor, glbctx_feat: Tensor,
rois: Tensor) -> Tensor:
"""Fuse global context feats with roi feats.
Args:
roi_feats (Tensor): RoI features.
glbctx_feat (Tensor): Global context feature..
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
Tensor: Fused feature.
"""
assert roi_feats.size(0) == rois.size(0)
# RuntimeError: isDifferentiableType(variable.scalar_type())
# INTERNAL ASSERT FAILED if detach() is not used when calling
# roi_head.predict().
img_inds = torch.unique(rois[:, 0].detach().cpu(), sorted=True).long()
fused_feats = torch.zeros_like(roi_feats)
for img_id in img_inds:
inds = (rois[:, 0] == img_id.item())
fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
return fused_feats
def _slice_pos_feats(self, feats: Tensor,
sampling_results: List[SamplingResult]) -> Tensor:
"""Get features from pos rois.
Args:
feats (Tensor): Input features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
Returns:
Tensor: Sliced features.
"""
num_rois = [res.priors.size(0) for res in sampling_results]
num_pos_rois = [res.pos_priors.size(0) for res in sampling_results]
inds = torch.zeros(sum(num_rois), dtype=torch.bool)
start = 0
for i in range(len(num_rois)):
start = 0 if i == 0 else start + num_rois[i - 1]
stop = start + num_pos_rois[i]
inds[start:stop] = 1
sliced_feats = feats[inds]
return sliced_feats
def _bbox_forward(self,
stage: int,
x: Tuple[Tensor],
rois: Tensor,
semantic_feat: Optional[Tensor] = None,
glbctx_feat: Optional[Tensor] = None) -> dict:
"""Box head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
semantic_feat (Tensor): Semantic feature. Defaults to None.
glbctx_feat (Tensor): Global context feature. Defaults to None.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
if self.with_semantic and semantic_feat is not None:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
if self.with_glbctx and glbctx_feat is not None:
bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
cls_score, bbox_pred, relayed_feat = bbox_head(
bbox_feats, return_shared_feat=True)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
relayed_feat=relayed_feat)
return bbox_results
def _mask_forward(self,
x: Tuple[Tensor],
rois: Tensor,
semantic_feat: Optional[Tensor] = None,
glbctx_feat: Optional[Tensor] = None,
relayed_feat: Optional[Tensor] = None) -> dict:
"""Mask head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
semantic_feat (Tensor): Semantic feature. Defaults to None.
glbctx_feat (Tensor): Global context feature. Defaults to None.
relayed_feat (Tensor): Relayed feature. Defaults to None.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
"""
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_semantic and semantic_feat is not None:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
if self.with_glbctx and glbctx_feat is not None:
mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
if self.with_feat_relay and relayed_feat is not None:
mask_feats = mask_feats + relayed_feat
mask_preds = self.mask_head(mask_feats)
mask_results = dict(mask_preds=mask_preds)
return mask_results
def bbox_loss(self,
stage: int,
x: Tuple[Tensor],
sampling_results: List[SamplingResult],
semantic_feat: Optional[Tensor] = None,
glbctx_feat: Optional[Tensor] = None) -> dict:
"""Run forward function and calculate loss for box head in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
semantic_feat (Tensor): Semantic feature. Defaults to None.
glbctx_feat (Tensor): Global context feature. Defaults to None.
Returns:
dict: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
- `rois` (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
- `bbox_targets` (tuple): Ground truth for proposals in a
single image. Containing the following list of Tensors:
(labels, label_weights, bbox_targets, bbox_weights)
"""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(
stage,
x,
rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
bbox_results.update(rois=rois)
bbox_loss_and_target = bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg[stage])
bbox_results.update(bbox_loss_and_target)
return bbox_results
def mask_loss(self,
x: Tuple[Tensor],
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
semantic_feat: Optional[Tensor] = None,
glbctx_feat: Optional[Tensor] = None,
relayed_feat: Optional[Tensor] = None) -> dict:
"""Run forward function and calculate loss for mask head in training.
Args:
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
semantic_feat (Tensor): Semantic feature. Defaults to None.
glbctx_feat (Tensor): Global context feature. Defaults to None.
relayed_feat (Tensor): Relayed feature. Defaults to None.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(
x,
pos_rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_loss_and_target = self.mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg[-1])
mask_results.update(mask_loss_and_target)
return mask_results
def semantic_loss(self, x: Tuple[Tensor],
batch_data_samples: SampleList) -> dict:
"""Semantic segmentation loss.
Args:
x (Tuple[Tensor]): Tuple of multi-level img features.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: Usually returns a dictionary with keys:
- `semantic_feat` (Tensor): Semantic feature.
- `loss_seg` (dict): Semantic segmentation loss.
"""
gt_semantic_segs = [
data_sample.gt_sem_seg.sem_seg
for data_sample in batch_data_samples
]
gt_semantic_segs = torch.stack(gt_semantic_segs)
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_segs)
semantic_results = dict(loss_seg=loss_seg, semantic_feat=semantic_feat)
return semantic_results
def global_context_loss(self, x: Tuple[Tensor],
batch_gt_instances: InstanceList) -> dict:
"""Global context loss.
Args:
x (Tuple[Tensor]): Tuple of multi-level img features.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict: Usually returns a dictionary with keys:
- `glbctx_feat` (Tensor): Global context feature.
- `loss_glbctx` (dict): Global context loss.
"""
gt_labels = [
gt_instances.labels for gt_instances in batch_gt_instances
]
mc_pred, glbctx_feat = self.glbctx_head(x)
loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)
global_context_results = dict(
loss_glbctx=loss_glbctx, glbctx_feat=glbctx_feat)
return global_context_results
def loss(self, x: Tensor, rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
losses = dict()
# semantic segmentation branch
if self.with_semantic:
semantic_results = self.semantic_loss(
x=x, batch_data_samples=batch_data_samples)
losses['loss_semantic_seg'] = semantic_results['loss_seg']
semantic_feat = semantic_results['semantic_feat']
else:
semantic_feat = None
# global context branch
if self.with_glbctx:
global_context_results = self.global_context_loss(
x=x, batch_gt_instances=batch_gt_instances)
losses['loss_glbctx'] = global_context_results['loss_glbctx']
glbctx_feat = global_context_results['glbctx_feat']
else:
glbctx_feat = None
results_list = rpn_results_list
num_imgs = len(batch_img_metas)
for stage in range(self.num_stages):
stage_loss_weight = self.stage_loss_weights[stage]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = self.bbox_assigner[stage]
bbox_sampler = self.bbox_sampler[stage]
for i in range(num_imgs):
results = results_list[i]
# rename rpn_results.bboxes to rpn_results.priors
results.priors = results.pop('bboxes')
assign_result = bbox_assigner.assign(
results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = bbox_sampler.sample(
assign_result,
results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = self.bbox_loss(
stage=stage,
x=x,
sampling_results=sampling_results,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
# refine bboxes
if stage < self.num_stages - 1:
bbox_head = self.bbox_head[stage]
with torch.no_grad():
results_list = bbox_head.refine_bboxes(
sampling_results=sampling_results,
bbox_results=bbox_results,
batch_img_metas=batch_img_metas)
if self.with_feat_relay:
relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],
sampling_results)
relayed_feat = self.feat_relay_head(relayed_feat)
else:
relayed_feat = None
# mask head forward and loss
mask_results = self.mask_loss(
x=x,
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_stage_loss_weight = sum(self.stage_loss_weights)
losses['loss_mask'] = mask_stage_loss_weight * mask_results[
'loss_mask']['loss_mask']
return losses
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to False.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
if self.with_glbctx:
_, glbctx_feat = self.glbctx_head(x)
else:
glbctx_feat = None
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x=x,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
batch_img_metas=batch_img_metas,
rpn_results_list=rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x=x,
semantic_heat=semantic_feat,
glbctx_feat=glbctx_feat,
batch_img_metas=batch_img_metas,
results_list=results_list,
rescale=rescale)
return results_list
def predict_mask(self,
x: Tuple[Tensor],
semantic_heat: Tensor,
glbctx_feat: Tensor,
batch_img_metas: List[dict],
results_list: List[InstanceData],
rescale: bool = False) -> List[InstanceData]:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
semantic_feat (Tensor): Semantic feature.
glbctx_feat (Tensor): Global context feature.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas=batch_img_metas,
device=mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
bboxes_results = self._bbox_forward(
stage=-1,
x=x,
rois=mask_rois,
semantic_feat=semantic_heat,
glbctx_feat=glbctx_feat)
relayed_feat = bboxes_results['relayed_feat']
relayed_feat = self.feat_relay_head(relayed_feat)
mask_results = self._mask_forward(
x=x,
rois=mask_rois,
semantic_feat=semantic_heat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_preds = mask_results['mask_preds']
# split batch mask prediction back to each image
num_bbox_per_img = tuple(len(_bbox) for _bbox in bboxes)
mask_preds = mask_preds.split(num_bbox_per_img, 0)
results_list = self.mask_head.predict_by_feat(
mask_preds=mask_preds,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale)
return results_list
def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (List[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
results = ()
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
if self.with_glbctx:
_, glbctx_feat = self.glbctx_head(x)
else:
glbctx_feat = None
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = bbox2roi(proposals)
# bbox head
if self.with_bbox:
rois, cls_scores, bbox_preds = self._refine_roi(
x=x,
rois=rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
batch_img_metas=batch_img_metas,
num_proposals_per_img=num_proposals_per_img)
results = results + (cls_scores, bbox_preds)
# mask head
if self.with_mask:
rois = torch.cat(rois)
bboxes_results = self._bbox_forward(
stage=-1,
x=x,
rois=rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
relayed_feat = bboxes_results['relayed_feat']
relayed_feat = self.feat_relay_head(relayed_feat)
mask_results = self._mask_forward(
x=x,
rois=rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_preds = mask_results['mask_preds']
mask_preds = mask_preds.split(num_proposals_per_img, 0)
results = results + (mask_preds, )
return results
| 27,836 | 40.057522 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/double_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
from torch import Tensor
from mmdet.registry import MODELS
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for `Double Head RCNN <https://arxiv.org/abs/1904.06493>`_.
Args:
reg_roi_scale_factor (float): The scale factor to extend the rois
used to extract the regression features.
"""
def __init__(self, reg_roi_scale_factor: float, **kwargs):
super().__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
| 1,956 | 35.240741 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/multi_instance_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList
from ..task_modules.samplers import SamplingResult
from ..utils import empty_instances, unpack_gt_instances
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class MultiInstanceRoIHead(StandardRoIHead):
"""The roi head for Multi-instance prediction."""
def __init__(self, num_instance: int = 2, *args, **kwargs) -> None:
self.num_instance = num_instance
super().__init__(*args, **kwargs)
def init_bbox_head(self, bbox_roi_extractor: ConfigType,
bbox_head: ConfigType) -> None:
"""Initialize box head and box roi extractor.
Args:
bbox_roi_extractor (dict or ConfigDict): Config of box
roi extractor.
bbox_head (dict or ConfigDict): Config of box in box head.
"""
self.bbox_roi_extractor = MODELS.build(bbox_roi_extractor)
self.bbox_head = MODELS.build(bbox_head)
def _bbox_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `cls_score_ref` (Tensor): The cls_score after refine model.
- `bbox_pred_ref` (Tensor): The bbox_pred after refine model.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_results = self.bbox_head(bbox_feats)
if self.bbox_head.with_refine:
bbox_results = dict(
cls_score=bbox_results[0],
bbox_pred=bbox_results[1],
cls_score_ref=bbox_results[2],
bbox_pred_ref=bbox_results[3],
bbox_feats=bbox_feats)
else:
bbox_results = dict(
cls_score=bbox_results[0],
bbox_pred=bbox_results[1],
bbox_feats=bbox_feats)
return bbox_results
def bbox_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult]) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
# If there is a refining process, add refine loss.
if 'cls_score_ref' in bbox_results:
bbox_loss_and_target = self.bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg)
bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])
bbox_loss_and_target_ref = self.bbox_head.loss_and_target(
cls_score=bbox_results['cls_score_ref'],
bbox_pred=bbox_results['bbox_pred_ref'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg)
bbox_results['loss_bbox']['loss_rcnn_emd_ref'] = \
bbox_loss_and_target_ref['loss_bbox']['loss_rcnn_emd']
else:
bbox_loss_and_target = self.bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg)
bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])
return bbox_results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: List[DetDataSample]) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, _ = outputs
sampling_results = []
for i in range(len(batch_data_samples)):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
batch_gt_instances_ignore=batch_gt_instances_ignore[i])
sampling_results.append(sampling_result)
losses = dict()
# bbox head loss
if self.with_bbox:
bbox_results = self.bbox_loss(x, sampling_results)
losses.update(bbox_results['loss_bbox'])
return losses
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
proposals = [res.bboxes for res in rpn_results_list]
rois = bbox2roi(proposals)
if rois.shape[0] == 0:
return empty_instances(
batch_img_metas, rois.device, task_type='bbox')
bbox_results = self._bbox_forward(x, rois)
# split batch bbox prediction back to each image
if 'cls_score_ref' in bbox_results:
cls_scores = bbox_results['cls_score_ref']
bbox_preds = bbox_results['bbox_pred_ref']
else:
cls_scores = bbox_results['cls_score']
bbox_preds = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
if bbox_preds is not None:
bbox_preds = bbox_preds.split(num_proposals_per_img, 0)
else:
bbox_preds = (None, ) * len(proposals)
result_list = self.bbox_head.predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=rcnn_test_cfg,
rescale=rescale)
return result_list
| 9,392 | 40.378855 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/sparse_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.task_modules.samplers import PseudoSampler
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList, OptConfigType
from ..utils.misc import empty_instances, unpack_gt_instances
from .cascade_roi_head import CascadeRoIHead
@MODELS.register_module()
class SparseRoIHead(CascadeRoIHead):
r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_
and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_
Args:
num_stages (int): Number of stage whole iterative process.
Defaults to 6.
stage_loss_weights (Tuple[float]): The loss
weight of each stage. By default all stages have
the same weight 1.
bbox_roi_extractor (:obj:`ConfigDict` or dict): Config of box
roi extractor.
mask_roi_extractor (:obj:`ConfigDict` or dict): Config of mask
roi extractor.
bbox_head (:obj:`ConfigDict` or dict): Config of box head.
mask_head (:obj:`ConfigDict` or dict): Config of mask head.
train_cfg (:obj:`ConfigDict` or dict, Optional): Configuration
information in train stage. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, Optional): Configuration
information in test stage. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict. Defaults to None.
"""
def __init__(self,
num_stages: int = 6,
stage_loss_weights: Tuple[float] = (1, 1, 1, 1, 1, 1),
proposal_feature_channel: int = 256,
bbox_roi_extractor: ConfigType = dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_roi_extractor: OptConfigType = None,
bbox_head: ConfigType = dict(
type='DIIHead',
num_classes=80,
num_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
hidden_channels=256,
dropout=0.0,
roi_feat_size=7,
ffn_act_cfg=dict(type='ReLU', inplace=True)),
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptConfigType = None) -> None:
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert len(stage_loss_weights) == num_stages
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
self.proposal_feature_channel = proposal_feature_channel
super().__init__(
num_stages=num_stages,
stage_loss_weights=stage_loss_weights,
bbox_roi_extractor=bbox_roi_extractor,
mask_roi_extractor=mask_roi_extractor,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
# train_cfg would be None when run the test.py
if train_cfg is not None:
for stage in range(num_stages):
assert isinstance(self.bbox_sampler[stage], PseudoSampler), \
'Sparse R-CNN and QueryInst only support `PseudoSampler`'
def bbox_loss(self, stage: int, x: Tuple[Tensor],
results_list: InstanceList, object_feats: Tensor,
batch_img_metas: List[dict],
batch_gt_instances: InstanceList) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
stage (int): The current stage in iterative process.
x (tuple[Tensor]): List of multi-level img features.
results_list (List[:obj:`InstanceData`]) : List of region
proposals.
object_feats (Tensor): The object feature extracted from
the previous stage.
batch_img_metas (list[dict]): Meta information of each image.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
proposal_list = [res.bboxes for res in results_list]
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
batch_img_metas)
imgs_whwh = torch.cat(
[res.imgs_whwh[None, ...] for res in results_list])
cls_pred_list = bbox_results['detached_cls_scores']
proposal_list = bbox_results['detached_proposals']
sampling_results = []
bbox_head = self.bbox_head[stage]
for i in range(len(batch_img_metas)):
pred_instances = InstanceData()
# TODO: Enhance the logic
pred_instances.bboxes = proposal_list[i] # for assinger
pred_instances.scores = cls_pred_list[i]
pred_instances.priors = proposal_list[i] # for sampler
assign_result = self.bbox_assigner[stage].assign(
pred_instances=pred_instances,
gt_instances=batch_gt_instances[i],
gt_instances_ignore=None,
img_meta=batch_img_metas[i])
sampling_result = self.bbox_sampler[stage].sample(
assign_result, pred_instances, batch_gt_instances[i])
sampling_results.append(sampling_result)
bbox_results.update(sampling_results=sampling_results)
cls_score = bbox_results['cls_score']
decoded_bboxes = bbox_results['decoded_bboxes']
cls_score = cls_score.view(-1, cls_score.size(-1))
decoded_bboxes = decoded_bboxes.view(-1, 4)
bbox_loss_and_target = bbox_head.loss_and_target(
cls_score,
decoded_bboxes,
sampling_results,
self.train_cfg[stage],
imgs_whwh=imgs_whwh,
concat=True)
bbox_results.update(bbox_loss_and_target)
# propose for the new proposal_list
proposal_list = []
for idx in range(len(batch_img_metas)):
results = InstanceData()
results.imgs_whwh = results_list[idx].imgs_whwh
results.bboxes = bbox_results['detached_proposals'][idx]
proposal_list.append(results)
bbox_results.update(results_list=proposal_list)
return bbox_results
def _bbox_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor,
object_feats: Tensor,
batch_img_metas: List[dict]) -> dict:
"""Box head forward function used in both training and testing. Returns
all regression, classification results and a intermediate feature.
Args:
stage (int): The current stage in iterative process.
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Each dimension means (img_index, x1, y1, x2, y2).
object_feats (Tensor): The object feature extracted from
the previous stage.
batch_img_metas (list[dict]): Meta information of each image.
Returns:
dict[str, Tensor]: a dictionary of bbox head outputs,
Containing the following results:
- cls_score (Tensor): The score of each class, has
shape (batch_size, num_proposals, num_classes)
when use focal loss or
(batch_size, num_proposals, num_classes+1)
otherwise.
- decoded_bboxes (Tensor): The regression results
with shape (batch_size, num_proposal, 4).
The last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- object_feats (Tensor): The object feature extracted
from current stage
- detached_cls_scores (list[Tensor]): The detached
classification results, length is batch_size, and
each tensor has shape (num_proposal, num_classes).
- detached_proposals (list[tensor]): The detached
regression results, length is batch_size, and each
tensor has shape (num_proposal, 4). The last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
"""
num_imgs = len(batch_img_metas)
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
cls_score, bbox_pred, object_feats, attn_feats = bbox_head(
bbox_feats, object_feats)
fake_bbox_results = dict(
rois=rois,
bbox_targets=(rois.new_zeros(len(rois), dtype=torch.long), None),
bbox_pred=bbox_pred.view(-1, bbox_pred.size(-1)),
cls_score=cls_score.view(-1, cls_score.size(-1)))
fake_sampling_results = [
InstanceData(pos_is_gt=rois.new_zeros(object_feats.size(1)))
for _ in range(len(batch_img_metas))
]
results_list = bbox_head.refine_bboxes(
sampling_results=fake_sampling_results,
bbox_results=fake_bbox_results,
batch_img_metas=batch_img_metas)
proposal_list = [res.bboxes for res in results_list]
bbox_results = dict(
cls_score=cls_score,
decoded_bboxes=torch.cat(proposal_list),
object_feats=object_feats,
attn_feats=attn_feats,
# detach then use it in label assign
detached_cls_scores=[
cls_score[i].detach() for i in range(num_imgs)
],
detached_proposals=[item.detach() for item in proposal_list])
return bbox_results
def _mask_forward(self, stage: int, x: Tuple[Tensor], rois: Tensor,
attn_feats) -> dict:
"""Mask head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
attn_feats (Tensot): Intermediate feature get from the last
diihead, has shape
(batch_size*num_proposals, feature_dimensions)
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
"""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
mask_preds = mask_head(mask_feats, attn_feats)
mask_results = dict(mask_preds=mask_preds)
return mask_results
def mask_loss(self, stage: int, x: Tuple[Tensor], bbox_results: dict,
batch_gt_instances: InstanceList,
rcnn_train_cfg: ConfigDict) -> dict:
"""Run forward function and calculate loss for mask head in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
bbox_results (dict): Results obtained from `bbox_loss`.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
attn_feats = bbox_results['attn_feats']
sampling_results = bbox_results['sampling_results']
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
attn_feats = torch.cat([
feats[res.pos_inds]
for (feats, res) in zip(attn_feats, sampling_results)
])
mask_results = self._mask_forward(stage, x, pos_rois, attn_feats)
mask_loss_and_target = self.mask_head[stage].loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=rcnn_train_cfg)
mask_results.update(mask_loss_and_target)
return mask_results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (List[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: a dictionary of loss components of all stage.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
object_feats = torch.cat(
[res.pop('features')[None, ...] for res in rpn_results_list])
results_list = rpn_results_list
losses = {}
for stage in range(self.num_stages):
stage_loss_weight = self.stage_loss_weights[stage]
# bbox head forward and loss
bbox_results = self.bbox_loss(
stage=stage,
x=x,
object_feats=object_feats,
results_list=results_list,
batch_img_metas=batch_img_metas,
batch_gt_instances=batch_gt_instances)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
if self.with_mask:
mask_results = self.mask_loss(
stage=stage,
x=x,
bbox_results=bbox_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg[stage])
for name, value in mask_results['loss_mask'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
object_feats = bbox_results['object_feats']
results_list = bbox_results['results_list']
return losses
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x(tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
proposal_list = [res.bboxes for res in rpn_results_list]
object_feats = torch.cat(
[res.pop('features')[None, ...] for res in rpn_results_list])
if all([proposal.shape[0] == 0 for proposal in proposal_list]):
# There is no proposal in the whole batch
return empty_instances(
batch_img_metas, x[0].device, task_type='bbox')
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
batch_img_metas)
object_feats = bbox_results['object_feats']
cls_score = bbox_results['cls_score']
proposal_list = bbox_results['detached_proposals']
num_classes = self.bbox_head[-1].num_classes
if self.bbox_head[-1].loss_cls.use_sigmoid:
cls_score = cls_score.sigmoid()
else:
cls_score = cls_score.softmax(-1)[..., :-1]
topk_inds_list = []
results_list = []
for img_id in range(len(batch_img_metas)):
cls_score_per_img = cls_score[img_id]
scores_per_img, topk_inds = cls_score_per_img.flatten(0, 1).topk(
self.test_cfg.max_per_img, sorted=False)
labels_per_img = topk_inds % num_classes
bboxes_per_img = proposal_list[img_id][topk_inds // num_classes]
topk_inds_list.append(topk_inds)
if rescale and bboxes_per_img.size(0) > 0:
assert batch_img_metas[img_id].get('scale_factor') is not None
scale_factor = bboxes_per_img.new_tensor(
batch_img_metas[img_id]['scale_factor']).repeat((1, 2))
bboxes_per_img = (
bboxes_per_img.view(bboxes_per_img.size(0), -1, 4) /
scale_factor).view(bboxes_per_img.size()[0], -1)
results = InstanceData()
results.bboxes = bboxes_per_img
results.scores = scores_per_img
results.labels = labels_per_img
results_list.append(results)
if self.with_mask:
for img_id in range(len(batch_img_metas)):
# add positive information in InstanceData to predict
# mask results in `mask_head`.
proposals = bbox_results['detached_proposals'][img_id]
topk_inds = topk_inds_list[img_id]
attn_feats = bbox_results['attn_feats'][img_id]
results_list[img_id].proposals = proposals
results_list[img_id].topk_inds = topk_inds
results_list[img_id].attn_feats = attn_feats
return results_list
def predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: InstanceList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image. Each item usually contains following keys:
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- proposal (Tensor): Bboxes predicted from bbox_head,
has a shape (num_instances, 4).
- topk_inds (Tensor): Topk indices of each image, has
shape (num_instances, )
- attn_feats (Tensor): Intermediate feature get from the last
diihead, has shape (num_instances, feature_dimensions)
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
proposal_list = [res.pop('proposals') for res in results_list]
topk_inds_list = [res.pop('topk_inds') for res in results_list]
attn_feats = torch.cat(
[res.pop('attn_feats')[None, ...] for res in results_list])
rois = bbox2roi(proposal_list)
if rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
last_stage = self.num_stages - 1
mask_results = self._mask_forward(last_stage, x, rois, attn_feats)
num_imgs = len(batch_img_metas)
mask_results['mask_preds'] = mask_results['mask_preds'].reshape(
num_imgs, -1, *mask_results['mask_preds'].size()[1:])
num_classes = self.bbox_head[-1].num_classes
mask_preds = []
for img_id in range(num_imgs):
topk_inds = topk_inds_list[img_id]
masks_per_img = mask_results['mask_preds'][img_id].flatten(
0, 1)[topk_inds]
masks_per_img = masks_per_img[:, None,
...].repeat(1, num_classes, 1, 1)
mask_preds.append(masks_per_img)
results_list = self.mask_head[-1].predict_by_feat(
mask_preds,
results_list,
batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale)
return results_list
# TODO: Need to refactor later
def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (List[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (List[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
outputs = unpack_gt_instances(batch_data_samples)
(batch_gt_instances, batch_gt_instances_ignore,
batch_img_metas) = outputs
all_stage_bbox_results = []
object_feats = torch.cat(
[res.pop('features')[None, ...] for res in rpn_results_list])
results_list = rpn_results_list
if self.with_bbox:
for stage in range(self.num_stages):
bbox_results = self.bbox_loss(
stage=stage,
x=x,
results_list=results_list,
object_feats=object_feats,
batch_img_metas=batch_img_metas,
batch_gt_instances=batch_gt_instances)
bbox_results.pop('loss_bbox')
# torch.jit does not support obj:SamplingResult
bbox_results.pop('results_list')
bbox_res = bbox_results.copy()
bbox_res.pop('sampling_results')
all_stage_bbox_results.append((bbox_res, ))
if self.with_mask:
attn_feats = bbox_results['attn_feats']
sampling_results = bbox_results['sampling_results']
pos_rois = bbox2roi(
[res.pos_priors for res in sampling_results])
attn_feats = torch.cat([
feats[res.pos_inds]
for (feats, res) in zip(attn_feats, sampling_results)
])
mask_results = self._mask_forward(stage, x, pos_rois,
attn_feats)
all_stage_bbox_results[-1] += (mask_results, )
return tuple(all_stage_bbox_results)
| 26,551 | 43.106312 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/cascade_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence, Tuple, Union
import torch
import torch.nn as nn
from mmengine.model import ModuleList
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.test_time_augs import merge_aug_masks
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi, get_box_tensor
from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptMultiConfig)
from ..utils.misc import empty_instances, unpack_gt_instances
from .base_roi_head import BaseRoIHead
@MODELS.register_module()
class CascadeRoIHead(BaseRoIHead):
"""Cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1712.00726
"""
def __init__(self,
num_stages: int,
stage_loss_weights: Union[List[float], Tuple[float]],
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert shared_head is None, \
'Shared head is not supported in Cascade RCNN anymore'
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
super().__init__(
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
shared_head=shared_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
def init_bbox_head(self, bbox_roi_extractor: MultiConfig,
bbox_head: MultiConfig) -> None:
"""Initialize box head and box roi extractor.
Args:
bbox_roi_extractor (:obj:`ConfigDict`, dict or list):
Config of box roi extractor.
bbox_head (:obj:`ConfigDict`, dict or list): Config
of box in box head.
"""
self.bbox_roi_extractor = ModuleList()
self.bbox_head = ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(self.num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(self.num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(MODELS.build(roi_extractor))
self.bbox_head.append(MODELS.build(head))
def init_mask_head(self, mask_roi_extractor: MultiConfig,
mask_head: MultiConfig) -> None:
"""Initialize mask head and mask roi extractor.
Args:
mask_head (dict): Config of mask in mask head.
mask_roi_extractor (:obj:`ConfigDict`, dict or list):
Config of mask roi extractor.
"""
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(self.num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(MODELS.build(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(self.num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(MODELS.build(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
def init_assigner_sampler(self) -> None:
"""Initialize assigner and sampler for each stage."""
self.bbox_assigner = []
self.bbox_sampler = []
if self.train_cfg is not None:
for idx, rcnn_train_cfg in enumerate(self.train_cfg):
self.bbox_assigner.append(
TASK_UTILS.build(rcnn_train_cfg.assigner))
self.current_stage = idx
self.bbox_sampler.append(
TASK_UTILS.build(
rcnn_train_cfg.sampler,
default_args=dict(context=self)))
def _bbox_forward(self, stage: int, x: Tuple[Tensor],
rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def bbox_loss(self, stage: int, x: Tuple[Tensor],
sampling_results: List[SamplingResult]) -> dict:
"""Run forward function and calculate loss for box head in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
Returns:
dict: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
- `rois` (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
- `bbox_targets` (tuple): Ground truth for proposals in a
single image. Containing the following list of Tensors:
(labels, label_weights, bbox_targets, bbox_weights)
"""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(stage, x, rois)
bbox_results.update(rois=rois)
bbox_loss_and_target = bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg[stage])
bbox_results.update(bbox_loss_and_target)
return bbox_results
def _mask_forward(self, stage: int, x: Tuple[Tensor],
rois: Tensor) -> dict:
"""Mask head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
"""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
mask_preds = mask_head(mask_feats)
mask_results = dict(mask_preds=mask_preds)
return mask_results
def mask_loss(self, stage: int, x: Tuple[Tensor],
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList) -> dict:
"""Run forward function and calculate loss for mask head in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(stage, x, pos_rois)
mask_head = self.mask_head[stage]
mask_loss_and_target = mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg[stage])
mask_results.update(mask_loss_and_target)
return mask_results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
# TODO: May add a new function in baseroihead
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
num_imgs = len(batch_data_samples)
losses = dict()
results_list = rpn_results_list
for stage in range(self.num_stages):
self.current_stage = stage
stage_loss_weight = self.stage_loss_weights[stage]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = self.bbox_assigner[stage]
bbox_sampler = self.bbox_sampler[stage]
for i in range(num_imgs):
results = results_list[i]
# rename rpn_results.bboxes to rpn_results.priors
results.priors = results.pop('bboxes')
assign_result = bbox_assigner.assign(
results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = bbox_sampler.sample(
assign_result,
results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = self.bbox_loss(stage, x, sampling_results)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
mask_results = self.mask_loss(stage, x, sampling_results,
batch_gt_instances)
for name, value in mask_results['loss_mask'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
# refine bboxes
if stage < self.num_stages - 1:
bbox_head = self.bbox_head[stage]
with torch.no_grad():
results_list = bbox_head.refine_bboxes(
sampling_results, bbox_results, batch_img_metas)
# Empty proposal
if results_list is None:
break
return losses
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False,
**kwargs) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
proposals = [res.bboxes for res in rpn_results_list]
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = bbox2roi(proposals)
if rois.shape[0] == 0:
return empty_instances(
batch_img_metas,
rois.device,
task_type='bbox',
box_type=self.bbox_head[-1].predict_box_type,
num_classes=self.bbox_head[-1].num_classes,
score_per_cls=rcnn_test_cfg is None)
rois, cls_scores, bbox_preds = self._refine_roi(
x=x,
rois=rois,
batch_img_metas=batch_img_metas,
num_proposals_per_img=num_proposals_per_img,
**kwargs)
results_list = self.bbox_head[-1].predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rescale=rescale,
rcnn_test_cfg=rcnn_test_cfg)
return results_list
def predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: List[InstanceData],
rescale: bool = False) -> List[InstanceData]:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
num_mask_rois_per_img = [len(res) for res in results_list]
aug_masks = []
for stage in range(self.num_stages):
mask_results = self._mask_forward(stage, x, mask_rois)
mask_preds = mask_results['mask_preds']
# split batch mask prediction back to each image
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
aug_masks.append([m.sigmoid().detach() for m in mask_preds])
merged_masks = []
for i in range(len(batch_img_metas)):
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])
merged_masks.append(merged_mask)
results_list = self.mask_head[-1].predict_by_feat(
mask_preds=merged_masks,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale,
activate_map=True)
return results_list
def _refine_roi(self, x: Tuple[Tensor], rois: Tensor,
batch_img_metas: List[dict],
num_proposals_per_img: Sequence[int], **kwargs) -> tuple:
"""Multi-stage refinement of RoI.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2]
batch_img_metas (list[dict]): List of image information.
num_proposals_per_img (sequence[int]): number of proposals
in each image.
Returns:
tuple:
- rois (Tensor): Refined RoI.
- cls_scores (list[Tensor]): Average predicted
cls score per image.
- bbox_preds (list[Tensor]): Bbox branch predictions
for the last stage of per image.
"""
# "ms" in variable names means multi-stage
ms_scores = []
for stage in range(self.num_stages):
bbox_results = self._bbox_forward(
stage=stage, x=x, rois=rois, **kwargs)
# split batch bbox prediction back to each image
cls_scores = bbox_results['cls_score']
bbox_preds = bbox_results['bbox_pred']
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
ms_scores.append(cls_scores)
# some detector with_reg is False, bbox_preds will be None
if bbox_preds is not None:
# TODO move this to a sabl_roi_head
# the bbox prediction of some detectors like SABL is not Tensor
if isinstance(bbox_preds, torch.Tensor):
bbox_preds = bbox_preds.split(num_proposals_per_img, 0)
else:
bbox_preds = self.bbox_head[stage].bbox_pred_split(
bbox_preds, num_proposals_per_img)
else:
bbox_preds = (None, ) * len(batch_img_metas)
if stage < self.num_stages - 1:
bbox_head = self.bbox_head[stage]
if bbox_head.custom_activation:
cls_scores = [
bbox_head.loss_cls.get_activation(s)
for s in cls_scores
]
refine_rois_list = []
for i in range(len(batch_img_metas)):
if rois[i].shape[0] > 0:
bbox_label = cls_scores[i][:, :-1].argmax(dim=1)
# Refactor `bbox_head.regress_by_class` to only accept
# box tensor without img_idx concatenated.
refined_bboxes = bbox_head.regress_by_class(
rois[i][:, 1:], bbox_label, bbox_preds[i],
batch_img_metas[i])
refined_bboxes = get_box_tensor(refined_bboxes)
refined_rois = torch.cat(
[rois[i][:, [0]], refined_bboxes], dim=1)
refine_rois_list.append(refined_rois)
rois = torch.cat(refine_rois_list)
# average scores of each image by stages
cls_scores = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(len(batch_img_metas))
]
return rois, cls_scores, bbox_preds
def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (List[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
results = ()
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = bbox2roi(proposals)
# bbox head
if self.with_bbox:
rois, cls_scores, bbox_preds = self._refine_roi(
x, rois, batch_img_metas, num_proposals_per_img)
results = results + (cls_scores, bbox_preds)
# mask head
if self.with_mask:
aug_masks = []
rois = torch.cat(rois)
for stage in range(self.num_stages):
mask_results = self._mask_forward(stage, x, rois)
mask_preds = mask_results['mask_preds']
mask_preds = mask_preds.split(num_proposals_per_img, 0)
aug_masks.append([m.sigmoid().detach() for m in mask_preds])
merged_masks = []
for i in range(len(batch_img_metas)):
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])
merged_masks.append(merged_mask)
results = results + (merged_masks, )
return results
| 24,347 | 41.790861 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/trident_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch
from mmcv.ops import batched_nms
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import InstanceList
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class TridentRoIHead(StandardRoIHead):
"""Trident roi head.
Args:
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
"""
def __init__(self, num_branch: int, test_branch_idx: int,
**kwargs) -> None:
self.num_branch = num_branch
self.test_branch_idx = test_branch_idx
super().__init__(**kwargs)
def merge_trident_bboxes(self,
trident_results: InstanceList) -> InstanceData:
"""Merge bbox predictions of each branch.
Args:
trident_results (List[:obj:`InstanceData`]): A list of InstanceData
predicted from every branch.
Returns:
:obj:`InstanceData`: merged InstanceData.
"""
bboxes = torch.cat([res.bboxes for res in trident_results])
scores = torch.cat([res.scores for res in trident_results])
labels = torch.cat([res.labels for res in trident_results])
nms_cfg = self.test_cfg['nms']
results = InstanceData()
if bboxes.numel() == 0:
results.bboxes = bboxes
results.scores = scores
results.labels = labels
else:
det_bboxes, keep = batched_nms(bboxes, scores, labels, nms_cfg)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_bboxes[:, -1]
results.labels = labels[keep]
if self.test_cfg['max_per_img'] > 0:
results = results[:self.test_cfg['max_per_img']]
return results
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
- Compute prediction bbox and label per branch.
- Merge predictions of each branch according to scores of
bboxes, i.e., bboxes with higher score are kept to give
top-k prediction.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results_list = super().predict(
x=x,
rpn_results_list=rpn_results_list,
batch_data_samples=batch_data_samples,
rescale=rescale)
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
merged_results_list = []
for i in range(len(batch_data_samples) // num_branch):
merged_results_list.append(
self.merge_trident_bboxes(results_list[i * num_branch:(i + 1) *
num_branch]))
return merged_results_list
| 4,298 | 37.044248 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/dynamic_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import numpy as np
import torch
from torch import Tensor
from mmdet.models.losses import SmoothL1Loss
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import InstanceList
from ..utils.misc import unpack_gt_instances
from .standard_roi_head import StandardRoIHead
EPS = 1e-15
@MODELS.register_module()
class DynamicRoIHead(StandardRoIHead):
"""RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_."""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
# the IoU history of the past `update_iter_interval` iterations
self.iou_history = []
# the beta history of the past `update_iter_interval` iterations
self.beta_history = []
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Forward function for training.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, _ = outputs
# assign gts and sample proposals
num_imgs = len(batch_data_samples)
sampling_results = []
cur_iou = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# record the `iou_topk`-th largest IoU in an image
iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
len(assign_result.max_overlaps))
ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
cur_iou.append(ious[-1].item())
sampling_results.append(sampling_result)
# average the current IoUs over images
cur_iou = np.mean(cur_iou)
self.iou_history.append(cur_iou)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self.bbox_loss(x, sampling_results)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self.mask_loss(x, sampling_results,
bbox_results['bbox_feats'],
batch_gt_instances)
losses.update(mask_results['loss_mask'])
# update IoU threshold and SmoothL1 beta
update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
if len(self.iou_history) % update_iter_interval == 0:
new_iou_thr, new_beta = self.update_hyperparameters()
return losses
def bbox_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult]) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_loss_and_target = self.bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg)
bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox'])
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
bbox_targets = bbox_loss_and_target['bbox_targets']
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
num_imgs = len(sampling_results)
if num_pos > 0:
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
return bbox_results
def update_hyperparameters(self):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
Returns:
tuple[float]: the updated ``iou_thr`` and ``beta``.
"""
new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
np.mean(self.iou_history))
self.iou_history = []
self.bbox_assigner.pos_iou_thr = new_iou_thr
self.bbox_assigner.neg_iou_thr = new_iou_thr
self.bbox_assigner.min_pos_iou = new_iou_thr
if (not self.beta_history) or (np.median(self.beta_history) < EPS):
# avoid 0 or too small value for new_beta
new_beta = self.bbox_head.loss_bbox.beta
else:
new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
np.median(self.beta_history))
self.beta_history = []
self.bbox_head.loss_bbox.beta = new_beta
return new_iou_thr, new_beta
| 6,794 | 40.432927 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/point_rend_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
from typing import List, Tuple
import torch
import torch.nn.functional as F
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList
from ..task_modules.samplers import SamplingResult
from ..utils import empty_instances
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class PointRendRoIHead(StandardRoIHead):
"""`PointRend <https://arxiv.org/abs/1912.08193>`_."""
def __init__(self, point_head: ConfigType, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
assert self.with_bbox and self.with_mask
self.init_point_head(point_head)
def init_point_head(self, point_head: ConfigType) -> None:
"""Initialize ``point_head``"""
self.point_head = MODELS.build(point_head)
def mask_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult], bbox_feats: Tensor,
batch_gt_instances: InstanceList) -> dict:
"""Run forward function and calculate loss for mask head and point head
in training."""
mask_results = super().mask_loss(
x=x,
sampling_results=sampling_results,
bbox_feats=bbox_feats,
batch_gt_instances=batch_gt_instances)
mask_point_results = self._mask_point_loss(
x=x,
sampling_results=sampling_results,
mask_preds=mask_results['mask_preds'],
batch_gt_instances=batch_gt_instances)
mask_results['loss_mask'].update(
loss_point=mask_point_results['loss_point'])
return mask_results
def _mask_point_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult],
mask_preds: Tensor,
batch_gt_instances: InstanceList) -> dict:
"""Run forward function and calculate loss for point head in
training."""
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
rel_roi_points = self.point_head.get_roi_rel_points_train(
mask_preds, pos_labels, cfg=self.train_cfg)
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points)
coarse_point_feats = point_sample(mask_preds, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
loss_and_target = self.point_head.loss_and_target(
point_pred=mask_point_pred,
rel_roi_points=rel_roi_points,
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
cfg=self.train_cfg)
return loss_and_target
def _mask_point_forward_test(self, x: Tuple[Tensor], rois: Tensor,
label_preds: Tensor,
mask_preds: Tensor) -> Tensor:
"""Mask refining process with point head in testing.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
rois (Tensor): shape (num_rois, 5).
label_preds (Tensor): The predication class for each rois.
mask_preds (Tensor): The predication coarse masks of
shape (num_rois, num_classes, small_size, small_size).
Returns:
Tensor: The refined masks of shape (num_rois, num_classes,
large_size, large_size).
"""
refined_mask_pred = mask_preds.clone()
for subdivision_step in range(self.test_cfg.subdivision_steps):
refined_mask_pred = F.interpolate(
refined_mask_pred,
scale_factor=self.test_cfg.scale_factor,
mode='bilinear',
align_corners=False)
# If `subdivision_num_points` is larger or equal to the
# resolution of the next step, then we can skip this step
num_rois, channels, mask_height, mask_width = \
refined_mask_pred.shape
if (self.test_cfg.subdivision_num_points >=
self.test_cfg.scale_factor**2 * mask_height * mask_width
and
subdivision_step < self.test_cfg.subdivision_steps - 1):
continue
point_indices, rel_roi_points = \
self.point_head.get_roi_rel_points_test(
refined_mask_pred, label_preds, cfg=self.test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x=x, rois=rois, rel_roi_points=rel_roi_points)
coarse_point_feats = point_sample(mask_preds, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_mask_pred = refined_mask_pred.reshape(
num_rois, channels, mask_height * mask_width)
refined_mask_pred = refined_mask_pred.scatter_(
2, point_indices, mask_point_pred)
refined_mask_pred = refined_mask_pred.view(num_rois, channels,
mask_height, mask_width)
return refined_mask_pred
def _get_fine_grained_point_feats(self, x: Tuple[Tensor], rois: Tensor,
rel_roi_points: Tensor) -> Tensor:
"""Sample fine grained feats from each level feature map and
concatenate them together.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
rois (Tensor): shape (num_rois, 5).
rel_roi_points (Tensor): A tensor of shape (num_rois, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid.
Returns:
Tensor: The fine grained features for each points,
has shape (num_rois, feats_channels, num_points).
"""
assert rois.shape[0] > 0, 'RoI is a empty tensor.'
num_imgs = x[0].shape[0]
fine_grained_feats = []
for idx in range(self.mask_roi_extractor.num_inputs):
feats = x[idx]
spatial_scale = 1. / float(
self.mask_roi_extractor.featmap_strides[idx])
point_feats = []
for batch_ind in range(num_imgs):
# unravel batch dim
feat = feats[batch_ind].unsqueeze(0)
inds = (rois[:, 0].long() == batch_ind)
if inds.any():
rel_img_points = rel_roi_point_to_rel_img_point(
rois=rois[inds],
rel_roi_points=rel_roi_points[inds],
img=feat.shape[2:],
spatial_scale=spatial_scale).unsqueeze(0)
point_feat = point_sample(feat, rel_img_points)
point_feat = point_feat.squeeze(0).transpose(0, 1)
point_feats.append(point_feat)
fine_grained_feats.append(torch.cat(point_feats, dim=0))
return torch.cat(fine_grained_feats, dim=1)
def predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: InstanceList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
# don't need to consider aug_test.
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
# split batch mask prediction back to each image
num_mask_rois_per_img = [len(res) for res in results_list]
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
# refine mask_preds
mask_rois = mask_rois.split(num_mask_rois_per_img, 0)
mask_preds_refined = []
for i in range(len(batch_img_metas)):
labels = results_list[i].labels
x_i = [xx[[i]] for xx in x]
mask_rois_i = mask_rois[i]
mask_rois_i[:, 0] = 0
mask_pred_i = self._mask_point_forward_test(
x_i, mask_rois_i, labels, mask_preds[i])
mask_preds_refined.append(mask_pred_i)
# TODO: Handle the case where rescale is false
results_list = self.mask_head.predict_by_feat(
mask_preds=mask_preds_refined,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale)
return results_list
| 10,479 | 43.219409 | 101 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/base_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
| 4,889 | 36.615385 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_scoring_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList
from ..task_modules.samplers import SamplingResult
from ..utils.misc import empty_instances
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class MaskScoringRoIHead(StandardRoIHead):
"""Mask Scoring RoIHead for `Mask Scoring RCNN.
<https://arxiv.org/abs/1903.00241>`_.
Args:
mask_iou_head (:obj`ConfigDict`, dict): The config of mask_iou_head.
"""
def __init__(self, mask_iou_head: ConfigType, **kwargs):
assert mask_iou_head is not None
super().__init__(**kwargs)
self.mask_iou_head = MODELS.build(mask_iou_head)
def forward(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList = None) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (List[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
results = ()
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
rois = bbox2roi(proposals)
# bbox head
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
results = results + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
results = results + (mask_results['mask_preds'], )
# mask iou head
cls_score = bbox_results['cls_score'][:100]
mask_preds = mask_results['mask_preds']
mask_feats = mask_results['mask_feats']
_, labels = cls_score[:, :self.bbox_head.num_classes].max(dim=1)
mask_iou_preds = self.mask_iou_head(
mask_feats, mask_preds[range(labels.size(0)), labels])
results = results + (mask_iou_preds, )
return results
def mask_loss(self, x: Tuple[Tensor],
sampling_results: List[SamplingResult], bbox_feats,
batch_gt_instances: InstanceList) -> dict:
"""Perform forward propagation and loss calculation of the mask head on
the features of the upstream network.
Args:
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
bbox_feats (Tensor): Extract bbox RoI features.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `mask_feats` (Tensor): Extract mask RoI features.
- `mask_targets` (Tensor): Mask target of each positive\
proposals in the image.
- `loss_mask` (dict): A dictionary of mask loss components.
- `loss_mask_iou` (Tensor): mask iou loss.
"""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_priors.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_priors.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_loss_and_target = self.mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg)
mask_targets = mask_loss_and_target['mask_targets']
mask_results.update(loss_mask=mask_loss_and_target['loss_mask'])
if mask_results['loss_mask'] is None:
return mask_results
# mask iou head forward and loss
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
pos_mask_pred = mask_results['mask_preds'][
range(mask_results['mask_preds'].size(0)), pos_labels]
mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
loss_mask_iou = self.mask_iou_head.loss_and_target(
pos_mask_iou_pred, pos_mask_pred, mask_targets, sampling_results,
batch_gt_instances, self.train_cfg)
mask_results['loss_mask'].update(loss_mask_iou)
return mask_results
def predict_mask(self,
x: Tensor,
batch_img_metas: List[dict],
results_list: InstanceList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
mask_feats = mask_results['mask_feats']
# get mask scores with mask iou head
labels = torch.cat([res.labels for res in results_list])
mask_iou_preds = self.mask_iou_head(
mask_feats, mask_preds[range(labels.size(0)), labels])
# split batch mask prediction back to each image
num_mask_rois_per_img = [len(res) for res in results_list]
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
mask_iou_preds = mask_iou_preds.split(num_mask_rois_per_img, 0)
# TODO: Handle the case where rescale is false
results_list = self.mask_head.predict_by_feat(
mask_preds=mask_preds,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale)
results_list = self.mask_iou_head.predict_by_feat(
mask_iou_preds=mask_iou_preds, results_list=results_list)
return results_list
| 8,784 | 41.033493 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import CascadeRoIHead
from .double_roi_head import DoubleHeadRoIHead
from .dynamic_roi_head import DynamicRoIHead
from .grid_roi_head import GridRoIHead
from .htc_roi_head import HybridTaskCascadeRoIHead
from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
FusedSemanticHead, GlobalContextHead, GridHead,
HTCMaskHead, MaskIoUHead, MaskPointHead,
SCNetMaskHead, SCNetSemanticHead)
from .mask_scoring_roi_head import MaskScoringRoIHead
from .multi_instance_roi_head import MultiInstanceRoIHead
from .pisa_roi_head import PISARoIHead
from .point_rend_roi_head import PointRendRoIHead
from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,
SingleRoIExtractor)
from .scnet_roi_head import SCNetRoIHead
from .shared_heads import ResLayer
from .sparse_roi_head import SparseRoIHead
from .standard_roi_head import StandardRoIHead
from .trident_roi_head import TridentRoIHead
__all__ = [
'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',
'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',
'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
'FeatureRelayHead', 'GlobalContextHead', 'MultiInstanceRoIHead'
]
| 2,043 | 51.410256 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/htc_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from mmdet.models.test_time_augs import merge_aug_masks
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import InstanceList, OptConfigType
from ..layers import adaptive_avg_pool2d
from ..task_modules.samplers import SamplingResult
from ..utils import empty_instances, unpack_gt_instances
from .cascade_roi_head import CascadeRoIHead
@MODELS.register_module()
class HybridTaskCascadeRoIHead(CascadeRoIHead):
"""Hybrid task cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1901.07518
Args:
num_stages (int): Number of cascade stages.
stage_loss_weights (list[float]): Loss weight for every stage.
semantic_roi_extractor (:obj:`ConfigDict` or dict, optional):
Config of semantic roi extractor. Defaults to None.
Semantic_head (:obj:`ConfigDict` or dict, optional):
Config of semantic head. Defaults to None.
interleaved (bool): Whether to interleaves the box branch and mask
branch. If True, the mask branch can take the refined bounding
box predictions. Defaults to True.
mask_info_flow (bool): Whether to turn on the mask information flow,
which means that feeding the mask features of the preceding stage
to the current stage. Defaults to True.
"""
def __init__(self,
num_stages: int,
stage_loss_weights: List[float],
semantic_roi_extractor: OptConfigType = None,
semantic_head: OptConfigType = None,
semantic_fusion: Tuple[str] = ('bbox', 'mask'),
interleaved: bool = True,
mask_info_flow: bool = True,
**kwargs) -> None:
super().__init__(
num_stages=num_stages,
stage_loss_weights=stage_loss_weights,
**kwargs)
assert self.with_bbox
assert not self.with_shared_head # shared head is not supported
if semantic_head is not None:
self.semantic_roi_extractor = MODELS.build(semantic_roi_extractor)
self.semantic_head = MODELS.build(semantic_head)
self.semantic_fusion = semantic_fusion
self.interleaved = interleaved
self.mask_info_flow = mask_info_flow
# TODO move to base_roi_head later
@property
def with_semantic(self) -> bool:
"""bool: whether the head has semantic head"""
return hasattr(self,
'semantic_head') and self.semantic_head is not None
def _bbox_forward(
self,
stage: int,
x: Tuple[Tensor],
rois: Tensor,
semantic_feat: Optional[Tensor] = None) -> Dict[str, Tensor]:
"""Box head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
semantic_feat (Tensor, optional): Semantic feature. Defaults to
None.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def bbox_loss(self,
stage: int,
x: Tuple[Tensor],
sampling_results: List[SamplingResult],
semantic_feat: Optional[Tensor] = None) -> dict:
"""Run forward function and calculate loss for box head in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
semantic_feat (Tensor, optional): Semantic feature. Defaults to
None.
Returns:
dict: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
- `rois` (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
- `bbox_targets` (tuple): Ground truth for proposals in a
single image. Containing the following list of Tensors:
(labels, label_weights, bbox_targets, bbox_weights)
"""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(
stage, x, rois, semantic_feat=semantic_feat)
bbox_results.update(rois=rois)
bbox_loss_and_target = bbox_head.loss_and_target(
cls_score=bbox_results['cls_score'],
bbox_pred=bbox_results['bbox_pred'],
rois=rois,
sampling_results=sampling_results,
rcnn_train_cfg=self.train_cfg[stage])
bbox_results.update(bbox_loss_and_target)
return bbox_results
def _mask_forward(self,
stage: int,
x: Tuple[Tensor],
rois: Tensor,
semantic_feat: Optional[Tensor] = None,
training: bool = True) -> Dict[str, Tensor]:
"""Mask head forward function used only in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
semantic_feat (Tensor, optional): Semantic feature. Defaults to
None.
training (bool): Mask Forward is different between training and
testing. If True, use the mask forward in training.
Defaults to True.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
"""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# semantic feature fusion
# element-wise sum for original features and pooled semantic features
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats = mask_feats + mask_semantic_feat
# mask information flow
# forward all previous mask heads to obtain last_feat, and fuse it
# with the normal mask feature
if training:
if self.mask_info_flow:
last_feat = None
for i in range(stage):
last_feat = self.mask_head[i](
mask_feats, last_feat, return_logits=False)
mask_preds = mask_head(
mask_feats, last_feat, return_feat=False)
else:
mask_preds = mask_head(mask_feats, return_feat=False)
mask_results = dict(mask_preds=mask_preds)
else:
aug_masks = []
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_preds, last_feat = mask_head(mask_feats, last_feat)
else:
mask_preds = mask_head(mask_feats)
aug_masks.append(mask_preds)
mask_results = dict(mask_preds=aug_masks)
return mask_results
def mask_loss(self,
stage: int,
x: Tuple[Tensor],
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
semantic_feat: Optional[Tensor] = None) -> dict:
"""Run forward function and calculate loss for mask head in training.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
semantic_feat (Tensor, optional): Semantic feature. Defaults to
None.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(
stage=stage,
x=x,
rois=pos_rois,
semantic_feat=semantic_feat,
training=True)
mask_head = self.mask_head[stage]
mask_loss_and_target = mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg[stage])
mask_results.update(mask_loss_and_target)
return mask_results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# semantic segmentation part
# 2 outputs: segmentation prediction and embedded features
losses = dict()
if self.with_semantic:
gt_semantic_segs = [
data_sample.gt_sem_seg.sem_seg
for data_sample in batch_data_samples
]
gt_semantic_segs = torch.stack(gt_semantic_segs)
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_segs)
losses['loss_semantic_seg'] = loss_seg
else:
semantic_feat = None
results_list = rpn_results_list
num_imgs = len(batch_img_metas)
for stage in range(self.num_stages):
self.current_stage = stage
stage_loss_weight = self.stage_loss_weights[stage]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = self.bbox_assigner[stage]
bbox_sampler = self.bbox_sampler[stage]
for i in range(num_imgs):
results = results_list[i]
# rename rpn_results.bboxes to rpn_results.priors
if 'bboxes' in results:
results.priors = results.pop('bboxes')
assign_result = bbox_assigner.assign(
results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = bbox_sampler.sample(
assign_result,
results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = self.bbox_loss(
stage=stage,
x=x,
sampling_results=sampling_results,
semantic_feat=semantic_feat)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
# interleaved execution: use regressed bboxes by the box branch
# to train the mask branch
if self.interleaved:
bbox_head = self.bbox_head[stage]
with torch.no_grad():
results_list = bbox_head.refine_bboxes(
sampling_results, bbox_results, batch_img_metas)
# re-assign and sample 512 RoIs from 512 RoIs
sampling_results = []
for i in range(num_imgs):
results = results_list[i]
# rename rpn_results.bboxes to rpn_results.priors
results.priors = results.pop('bboxes')
assign_result = bbox_assigner.assign(
results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = bbox_sampler.sample(
assign_result,
results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
mask_results = self.mask_loss(
stage=stage,
x=x,
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
semantic_feat=semantic_feat)
for name, value in mask_results['loss_mask'].items():
losses[f's{stage}.{name}'] = (
value * stage_loss_weight if 'loss' in name else value)
# refine bboxes (same as Cascade R-CNN)
if stage < self.num_stages - 1 and not self.interleaved:
bbox_head = self.bbox_head[stage]
with torch.no_grad():
results_list = bbox_head.refine_bboxes(
sampling_results=sampling_results,
bbox_results=bbox_results,
batch_img_metas=batch_img_metas)
return losses
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to False.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x=x,
semantic_feat=semantic_feat,
batch_img_metas=batch_img_metas,
rpn_results_list=rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x=x,
semantic_heat=semantic_feat,
batch_img_metas=batch_img_metas,
results_list=results_list,
rescale=rescale)
return results_list
def predict_mask(self,
x: Tuple[Tensor],
semantic_heat: Tensor,
batch_img_metas: List[dict],
results_list: InstanceList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
semantic_feat (Tensor): Semantic feature.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
num_imgs = len(batch_img_metas)
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas=batch_img_metas,
device=mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
num_mask_rois_per_img = [len(res) for res in results_list]
mask_results = self._mask_forward(
stage=-1,
x=x,
rois=mask_rois,
semantic_feat=semantic_heat,
training=False)
# split batch mask prediction back to each image
aug_masks = [[
mask.sigmoid().detach()
for mask in mask_preds.split(num_mask_rois_per_img, 0)
] for mask_preds in mask_results['mask_preds']]
merged_masks = []
for i in range(num_imgs):
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])
merged_masks.append(merged_mask)
results_list = self.mask_head[-1].predict_by_feat(
mask_preds=merged_masks,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale,
activate_map=True)
return results_list
def forward(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
x (List[Tensor]): Multi-level features that may have different
resolutions.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns
tuple: A tuple of features from ``bbox_head`` and ``mask_head``
forward.
"""
results = ()
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
num_imgs = len(batch_img_metas)
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
proposals = [rpn_results.bboxes for rpn_results in rpn_results_list]
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = bbox2roi(proposals)
# bbox head
if self.with_bbox:
rois, cls_scores, bbox_preds = self._refine_roi(
x=x,
rois=rois,
semantic_feat=semantic_feat,
batch_img_metas=batch_img_metas,
num_proposals_per_img=num_proposals_per_img)
results = results + (cls_scores, bbox_preds)
# mask head
if self.with_mask:
rois = torch.cat(rois)
mask_results = self._mask_forward(
stage=-1,
x=x,
rois=rois,
semantic_feat=semantic_feat,
training=False)
aug_masks = [[
mask.sigmoid().detach()
for mask in mask_preds.split(num_proposals_per_img, 0)
] for mask_preds in mask_results['mask_preds']]
merged_masks = []
for i in range(num_imgs):
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])
merged_masks.append(merged_mask)
results = results + (merged_masks, )
return results
| 24,599 | 41.268041 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/pisa_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
from torch import Tensor
from mmdet.models.task_modules import SamplingResult
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import InstanceList
from ..losses.pisa_loss import carl_loss, isr_p
from ..utils import unpack_gt_instances
from .standard_roi_head import StandardRoIHead
@MODELS.register_module()
class PISARoIHead(StandardRoIHead):
r"""The RoI head for `Prime Sample Attention in Object Detection
<https://arxiv.org/abs/1904.04821>`_."""
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: List[DetDataSample]) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
assert len(rpn_results_list) == len(batch_data_samples)
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, _ = outputs
# assign gts and sample proposals
num_imgs = len(batch_data_samples)
sampling_results = []
neg_label_weights = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
if isinstance(sampling_result, tuple):
sampling_result, neg_label_weight = sampling_result
sampling_results.append(sampling_result)
neg_label_weights.append(neg_label_weight)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self.bbox_loss(
x, sampling_results, neg_label_weights=neg_label_weights)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self.mask_loss(x, sampling_results,
bbox_results['bbox_feats'],
batch_gt_instances)
losses.update(mask_results['loss_mask'])
return losses
def bbox_loss(self,
x: Tuple[Tensor],
sampling_results: List[SamplingResult],
neg_label_weights: List[Tensor] = None) -> dict:
"""Perform forward propagation and loss calculation of the bbox head on
the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
- `loss_bbox` (dict): A dictionary of bbox loss components.
"""
rois = bbox2roi([res.priors for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results,
self.train_cfg)
# neg_label_weights obtained by sampler is image-wise, mapping back to
# the corresponding location in label weights
if neg_label_weights[0] is not None:
label_weights = bbox_targets[1]
cur_num_rois = 0
for i in range(len(sampling_results)):
num_pos = sampling_results[i].pos_inds.size(0)
num_neg = sampling_results[i].neg_inds.size(0)
label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
num_neg] = neg_label_weights[i]
cur_num_rois += num_pos + num_neg
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Apply ISR-P
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
bbox_targets = isr_p(
cls_score,
bbox_pred,
bbox_targets,
rois,
sampling_results,
self.bbox_head.loss_cls,
self.bbox_head.bbox_coder,
**isr_cfg,
num_class=self.bbox_head.num_classes)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
*bbox_targets)
# Add CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
cls_score,
bbox_targets[0],
bbox_pred,
bbox_targets[2],
self.bbox_head.loss_bbox,
**carl_cfg,
num_class=self.bbox_head.num_classes)
loss_bbox.update(loss_carl)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
| 5,966 | 39.04698 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/test_mixins.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# TODO: delete this file after refactor
import sys
import torch
from mmdet.models.layers import multiclass_nms
from mmdet.models.test_time_augs import merge_aug_bboxes, merge_aug_masks
from mmdet.structures.bbox import bbox2roi, bbox_mapping
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class BBoxTestMixin:
if sys.version_info >= (3, 7):
# TODO: Currently not supported
async def async_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False,
**kwargs):
"""Asynchronized test for box head without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
async with completed(
__name__, 'bbox_head_forward',
sleep_interval=sleep_interval):
cls_score, bbox_pred = self.bbox_head(roi_feats)
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
# TODO: Currently not supported
def aug_test_bboxes(self, feats, img_metas, rpn_results_list,
rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
# TODO more flexible
proposals = bbox_mapping(rpn_results_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_results = self.bbox_forward(x, rois)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
if merged_bboxes.shape[0] == 0:
# There is no proposal in the single image
det_bboxes = merged_bboxes.new_zeros(0, 5)
det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long)
else:
det_bboxes, det_labels = multiclass_nms(merged_bboxes,
merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
class MaskTestMixin:
if sys.version_info >= (3, 7):
# TODO: Currently not supported
async def async_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False,
mask_test_cfg=None):
"""Asynchronized test for mask head without augmentation."""
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
if rescale and not isinstance(scale_factor,
(float, torch.Tensor)):
scale_factor = det_bboxes.new_tensor(scale_factor)
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
if mask_test_cfg and \
mask_test_cfg.get('async_sleep_interval'):
sleep_interval = mask_test_cfg['async_sleep_interval']
else:
sleep_interval = 0.035
async with completed(
__name__,
'mask_head_forward',
sleep_interval=sleep_interval):
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_results(
mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,
scale_factor, rescale)
return segm_result
# TODO: Currently not supported
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
scale_factor = det_bboxes.new_ones(4)
segm_result = self.mask_head.get_results(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=scale_factor,
rescale=False)
return segm_result
| 7,451 | 42.325581 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptMultiConfig
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (list[int]): Strides of input feature maps.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
@property
def num_inputs(self) -> int:
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg: ConfigType,
featmap_strides: List[int]) -> nn.ModuleList:
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config RoI layer operation. Options are modules under
``mmcv/ops`` such as ``RoIAlign``.
featmap_strides (list[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
:obj:`nn.ModuleList`: The RoI extractor modules for each level
feature map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:
"""Scale RoI coordinates by scale factor.
Args:
rois (Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
pass
| 3,822 | 34.073394 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/roi_extractors/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_extractor import BaseRoIExtractor
from .generic_roi_extractor import GenericRoIExtractor
from .single_level_roi_extractor import SingleRoIExtractor
__all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']
| 288 | 40.285714 | 75 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
Defaults to 56.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
finest_scale: int = 56,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
roi_layer=roi_layer,
out_channels=out_channels,
featmap_strides=featmap_strides,
init_cfg=init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None):
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
# convert fp32 to fp16 when amp is on
rois = rois.type_as(feats[0])
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
| 4,558 | 36.991667 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
from mmcv.cnn.bricks import build_plugin_layer
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Defaults to 'sum'.
pre_cfg (:obj:`ConfigDict` or dict): Specify pre-processing modules.
Defaults to None.
post_cfg (:obj:`ConfigDict` or dict): Specify post-processing modules.
Defaults to None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation: str = 'sum',
pre_cfg: OptConfigType = None,
post_cfg: OptConfigType = None,
**kwargs) -> None:
super().__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if num_levels == 1:
return self.roi_layers[0](feats[0], rois)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
| 3,805 | 35.951456 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple, Union
from torch import Tensor
from mmdet.registry import MODELS
from .convfc_bbox_head import ConvFCBBoxHead
@MODELS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x: Tensor) -> Tensor:
"""Forward function for shared part.
Args:
x (Tensor): Input feature.
Returns:
Tensor: Shared feature.
"""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x: Tensor) -> Tuple[Tensor]:
"""Forward function for classification and regression parts.
Args:
x (Tensor): Input feature.
Returns:
tuple[Tensor]:
- cls_score (Tensor): classification prediction.
- bbox_pred (Tensor): bbox prediction.
"""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(
self,
x: Tensor,
return_shared_feat: bool = False) -> Union[Tensor, Tuple[Tensor]]:
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
| 2,836 | 26.813725 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor, nn
from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.utils import empty_instances
from mmdet.registry import MODELS
from mmdet.structures.bbox import bbox_overlaps
@MODELS.register_module()
class MultiInstanceBBoxHead(BBoxHead):
r"""Bbox head used in CrowdDet.
.. code-block:: none
/-> cls convs_1 -> cls fcs_1 -> cls_1
|--
| \-> reg convs_1 -> reg fcs_1 -> reg_1
|
| /-> cls convs_2 -> cls fcs_2 -> cls_2
shared convs -> shared fcs |--
| \-> reg convs_2 -> reg fcs_2 -> reg_2
|
| ...
|
| /-> cls convs_k -> cls fcs_k -> cls_k
|--
\-> reg convs_k -> reg fcs_k -> reg_k
Args:
num_instance (int): The number of branches after shared fcs.
Defaults to 2.
with_refine (bool): Whether to use refine module. Defaults to False.
num_shared_convs (int): The number of shared convs. Defaults to 0.
num_shared_fcs (int): The number of shared fcs. Defaults to 2.
num_cls_convs (int): The number of cls convs. Defaults to 0.
num_cls_fcs (int): The number of cls fcs. Defaults to 0.
num_reg_convs (int): The number of reg convs. Defaults to 0.
num_reg_fcs (int): The number of reg fcs. Defaults to 0.
conv_out_channels (int): The number of conv out channels.
Defaults to 256.
fc_out_channels (int): The number of fc out channels. Defaults to 1024.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
""" # noqa: W605
def __init__(self,
num_instance: int = 2,
with_refine: bool = False,
num_shared_convs: int = 0,
num_shared_fcs: int = 2,
num_cls_convs: int = 0,
num_cls_fcs: int = 0,
num_reg_convs: int = 0,
num_reg_fcs: int = 0,
conv_out_channels: int = 256,
fc_out_channels: int = 1024,
init_cfg: Optional[Union[dict, ConfigDict]] = None,
*args,
**kwargs) -> None:
super().__init__(*args, init_cfg=init_cfg, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
assert num_instance == 2, 'Currently only 2 instances are supported'
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_instance = num_instance
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.with_refine = with_refine
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
self.relu = nn.ReLU(inplace=True)
if self.with_refine:
refine_model_cfg = {
'type': 'Linear',
'in_features': self.shared_out_channels + 20,
'out_features': self.shared_out_channels
}
self.shared_fcs_ref = MODELS.build(refine_model_cfg)
self.fc_cls_ref = nn.ModuleList()
self.fc_reg_ref = nn.ModuleList()
self.cls_convs = nn.ModuleList()
self.cls_fcs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.reg_fcs = nn.ModuleList()
self.cls_last_dim = list()
self.reg_last_dim = list()
self.fc_cls = nn.ModuleList()
self.fc_reg = nn.ModuleList()
for k in range(self.num_instance):
# add cls specific branch
cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
self.cls_convs.append(cls_convs)
self.cls_fcs.append(cls_fcs)
self.cls_last_dim.append(cls_last_dim)
# add reg specific branch
reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
self.reg_convs.append(reg_convs)
self.reg_fcs.append(reg_fcs)
self.reg_last_dim.append(reg_last_dim)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
if self.with_cls:
if self.custom_cls_channels:
cls_channels = self.loss_cls.get_cls_channels(
self.num_classes)
else:
cls_channels = self.num_classes + 1
cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy
cls_predictor_cfg_.update(
in_features=self.cls_last_dim[k],
out_features=cls_channels)
self.fc_cls.append(MODELS.build(cls_predictor_cfg_))
if self.with_refine:
self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_))
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
reg_predictor_cfg_ = self.reg_predictor_cfg.copy()
reg_predictor_cfg_.update(
in_features=self.reg_last_dim[k], out_features=out_dim_reg)
self.fc_reg.append(MODELS.build(reg_predictor_cfg_))
if self.with_refine:
self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_))
if init_cfg is None:
# when init_cfg is None,
# It has been set to
# [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],
# [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]
# after `super(ConvFCBBoxHead, self).__init__()`
# we only need to append additional configuration
# for `shared_fcs`, `cls_fcs` and `reg_fcs`
self.init_cfg += [
dict(
type='Xavier',
distribution='uniform',
override=[
dict(name='shared_fcs'),
dict(name='cls_fcs'),
dict(name='reg_fcs')
])
]
def _add_conv_fc_branch(self,
num_branch_convs: int,
num_branch_fcs: int,
in_channels: int,
is_shared: bool = False) -> tuple:
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels, self.conv_out_channels, 3,
padding=1))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def forward(self, x: Tuple[Tensor]) -> tuple:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of classification scores and bbox prediction.
- cls_score (Tensor): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_pred (Tensor): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
- cls_score_ref (Tensor): The cls_score after refine model.
- bbox_pred_ref (Tensor): The bbox_pred after refine model.
"""
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
# separate branches
cls_score = list()
bbox_pred = list()
for k in range(self.num_instance):
for conv in self.cls_convs[k]:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs[k]:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs[k]:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs[k]:
x_reg = self.relu(fc(x_reg))
cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None)
bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None)
if self.with_refine:
x_ref = x
cls_score_ref = list()
bbox_pred_ref = list()
for k in range(self.num_instance):
feat_ref = cls_score[k].softmax(dim=-1)
feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]),
dim=1).repeat(1, 4)
feat_ref = torch.cat((x_ref, feat_ref), dim=1)
feat_ref = F.relu_(self.shared_fcs_ref(feat_ref))
cls_score_ref.append(self.fc_cls_ref[k](feat_ref))
bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref))
cls_score = torch.cat(cls_score, dim=1)
bbox_pred = torch.cat(bbox_pred, dim=1)
cls_score_ref = torch.cat(cls_score_ref, dim=1)
bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1)
return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref
cls_score = torch.cat(cls_score, dim=1)
bbox_pred = torch.cat(bbox_pred, dim=1)
return cls_score, bbox_pred
def get_targets(self,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict,
concat: bool = True) -> tuple:
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_targets_single` function.
Args:
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all proposals in a
batch, each tensor in list has shape (num_proposals,) when
`concat=False`, otherwise just a single tensor has shape
(num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has shape
(num_proposals,) when `concat=False`, otherwise just a single
tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target for all
proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a single
tensor has shape (num_all_proposals, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
"""
labels = []
bbox_targets = []
bbox_weights = []
label_weights = []
for i in range(len(sampling_results)):
sample_bboxes = torch.cat([
sampling_results[i].pos_gt_bboxes,
sampling_results[i].neg_gt_bboxes
])
sample_priors = sampling_results[i].priors
sample_priors = sample_priors.repeat(1, self.num_instance).reshape(
-1, 4)
sample_bboxes = sample_bboxes.reshape(-1, 4)
if not self.reg_decoded_bbox:
_bbox_targets = self.bbox_coder.encode(sample_priors,
sample_bboxes)
else:
_bbox_targets = sample_priors
_bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4)
_bbox_weights = torch.ones(_bbox_targets.shape)
_labels = torch.cat([
sampling_results[i].pos_gt_labels,
sampling_results[i].neg_gt_labels
])
_labels_weights = torch.ones(_labels.shape)
bbox_targets.append(_bbox_targets)
bbox_weights.append(_bbox_weights)
labels.append(_labels)
label_weights.append(_labels_weights)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor,
labels: Tensor, label_weights: Tensor, bbox_targets: Tensor,
bbox_weights: Tensor, **kwargs) -> dict:
"""Calculate the loss based on the network predictions and targets.
Args:
cls_score (Tensor): Classification prediction results of all class,
has shape (batch_size * num_proposals_single_image,
(num_classes + 1) * k), k represents the number of prediction
boxes generated by each proposal box.
bbox_pred (Tensor): Regression prediction results, has shape
(batch_size * num_proposals_single_image, 4 * k), the last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
rois (Tensor): RoIs with the shape
(batch_size * num_proposals_single_image, 5) where the first
column indicates batch id of each RoI.
labels (Tensor): Gt_labels for all proposals in a batch, has
shape (batch_size * num_proposals_single_image, k).
label_weights (Tensor): Labels_weights for all proposals in a
batch, has shape (batch_size * num_proposals_single_image, k).
bbox_targets (Tensor): Regression target for all proposals in a
batch, has shape (batch_size * num_proposals_single_image,
4 * k), the last dimension 4 represents [tl_x, tl_y, br_x,
br_y].
bbox_weights (Tensor): Regression weights for all proposals in a
batch, has shape (batch_size * num_proposals_single_image,
4 * k).
Returns:
dict: A dictionary of loss.
"""
losses = dict()
if bbox_pred.numel():
loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2],
bbox_pred[:, 4:8], cls_score[:, 2:4],
bbox_targets, labels)
loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4],
bbox_pred[:, 0:4], cls_score[:, 0:2],
bbox_targets, labels)
loss = torch.cat([loss_0, loss_1], dim=1)
_, min_indices = loss.min(dim=1)
loss_emd = loss[torch.arange(loss.shape[0]), min_indices]
loss_emd = loss_emd.mean()
else:
loss_emd = bbox_pred.sum()
losses['loss_rcnn_emd'] = loss_emd
return losses
def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor,
bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor,
labels: Tensor) -> Tensor:
"""Calculate the emd loss.
Note:
This implementation is modified from https://github.com/Purkialo/
CrowdDet/blob/master/lib/det_oprs/loss_opr.py
Args:
bbox_pred_0 (Tensor): Part of regression prediction results, has
shape (batch_size * num_proposals_single_image, 4), the last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
cls_score_0 (Tensor): Part of classification prediction results,
has shape (batch_size * num_proposals_single_image,
(num_classes + 1)), where 1 represents the background.
bbox_pred_1 (Tensor): The other part of regression prediction
results, has shape (batch_size*num_proposals_single_image, 4).
cls_score_1 (Tensor):The other part of classification prediction
results, has shape (batch_size * num_proposals_single_image,
(num_classes + 1)).
targets (Tensor):Regression target for all proposals in a
batch, has shape (batch_size * num_proposals_single_image,
4 * k), the last dimension 4 represents [tl_x, tl_y, br_x,
br_y], k represents the number of prediction boxes generated
by each proposal box.
labels (Tensor): Gt_labels for all proposals in a batch, has
shape (batch_size * num_proposals_single_image, k).
Returns:
torch.Tensor: The calculated loss.
"""
bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1],
dim=1).reshape(-1, bbox_pred_0.shape[-1])
cls_score = torch.cat([cls_score_0, cls_score_1],
dim=1).reshape(-1, cls_score_0.shape[-1])
targets = targets.reshape(-1, 4)
labels = labels.long().flatten()
# masks
valid_masks = labels >= 0
fg_masks = labels > 0
# multiple class
bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4)
fg_gt_classes = labels[fg_masks]
bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :]
# loss for regression
loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks])
loss_bbox = loss_bbox.sum(dim=1)
# loss for classification
labels = labels * valid_masks
loss_cls = self.loss_cls(cls_score, labels)
loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox
loss = loss_cls.reshape(-1, 2).sum(dim=1)
return loss.reshape(-1, 1)
def _predict_by_feat_single(
self,
roi: Tensor,
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
rescale: bool = False,
rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas. has shape
(num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
cls_score = cls_score.reshape(-1, self.num_classes + 1)
bbox_pred = bbox_pred.reshape(-1, 4)
roi = roi.repeat_interleave(self.num_instance, dim=0)
results = InstanceData()
if roi.shape[0] == 0:
return empty_instances([img_meta],
roi.device,
task_type='bbox',
instance_results=[results])[0]
scores = cls_score.softmax(dim=-1) if cls_score is not None else None
img_shape = img_meta['img_shape']
bboxes = self.bbox_coder.decode(
roi[..., 1:], bbox_pred, max_shape=img_shape)
if rescale and bboxes.size(0) > 0:
assert img_meta.get('scale_factor') is not None
scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(
(1, 2))
bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(
bboxes.size()[0], -1)
if rcnn_test_cfg is None:
# This means that it is aug test.
# It needs to return the raw results without nms.
results.bboxes = bboxes
results.scores = scores
else:
roi_idx = np.tile(
np.arange(bboxes.shape[0] / self.num_instance)[:, None],
(1, self.num_instance)).reshape(-1, 1)[:, 0]
roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape(
-1, 1)
bboxes = torch.cat([bboxes, roi_idx], dim=1)
det_bboxes, det_scores = self.set_nms(
bboxes, scores[:, 1], rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_scores
results.labels = torch.zeros_like(det_scores)
return results
@staticmethod
def set_nms(bboxes: Tensor,
scores: Tensor,
score_thr: float,
iou_threshold: float,
max_num: int = -1) -> Tuple[Tensor, Tensor]:
"""NMS for multi-instance prediction. Please refer to
https://github.com/Purkialo/CrowdDet for more details.
Args:
bboxes (Tensor): predict bboxes.
scores (Tensor): The score of each predict bbox.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
iou_threshold (float): IoU threshold to be considered as
conflicted.
max_num (int, optional): if there are more than max_num bboxes
after NMS, only top max_num will be kept. Default to -1.
Returns:
Tuple[Tensor, Tensor]: (bboxes, scores).
"""
bboxes = bboxes[scores > score_thr]
scores = scores[scores > score_thr]
ordered_scores, order = scores.sort(descending=True)
ordered_bboxes = bboxes[order]
roi_idx = ordered_bboxes[:, -1]
keep = torch.ones(len(ordered_bboxes)) == 1
ruler = torch.arange(len(ordered_bboxes))
while ruler.shape[0] > 0:
basement = ruler[0]
ruler = ruler[1:]
idx = roi_idx[basement]
# calculate the body overlap
basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4)
ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
overlap = bbox_overlaps(basement_bbox, ruler_bbox)
indices = torch.where(overlap > iou_threshold)[1]
loc = torch.where(roi_idx[ruler][indices] == idx)
# the mask won't change in the step
mask = keep[ruler[indices][loc]]
keep[ruler[indices]] = False
keep[ruler[indices][loc][mask]] = True
ruler[~keep[ruler]] = -1
ruler = ruler[ruler > 0]
keep = keep[order.sort()[1]]
return bboxes[keep][:max_num, :], scores[keep][:max_num]
| 27,194 | 42.651685 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from torch.nn.modules.utils import _pair
from mmdet.models.layers import multiclass_nms
from mmdet.models.losses import accuracy
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.utils import empty_instances, multi_apply
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import get_box_tensor, scale_boxes
from mmdet.utils import ConfigType, InstanceList, OptMultiConfig
@MODELS.register_module()
class BBoxHead(BaseModule):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively."""
def __init__(self,
with_avg_pool: bool = False,
with_cls: bool = True,
with_reg: bool = True,
roi_feat_size: int = 7,
in_channels: int = 256,
num_classes: int = 80,
bbox_coder: ConfigType = dict(
type='DeltaXYWHBBoxCoder',
clip_border=True,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
predict_box_type: str = 'hbox',
reg_class_agnostic: bool = False,
reg_decoded_bbox: bool = False,
reg_predictor_cfg: ConfigType = dict(type='Linear'),
cls_predictor_cfg: ConfigType = dict(type='Linear'),
loss_cls: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert with_cls or with_reg
self.with_avg_pool = with_avg_pool
self.with_cls = with_cls
self.with_reg = with_reg
self.roi_feat_size = _pair(roi_feat_size)
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.num_classes = num_classes
self.predict_box_type = predict_box_type
self.reg_class_agnostic = reg_class_agnostic
self.reg_decoded_bbox = reg_decoded_bbox
self.reg_predictor_cfg = reg_predictor_cfg
self.cls_predictor_cfg = cls_predictor_cfg
self.bbox_coder = TASK_UTILS.build(bbox_coder)
self.loss_cls = MODELS.build(loss_cls)
self.loss_bbox = MODELS.build(loss_bbox)
in_channels = self.in_channels
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
else:
in_channels *= self.roi_feat_area
if self.with_cls:
# need to add background class
if self.custom_cls_channels:
cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
else:
cls_channels = num_classes + 1
cls_predictor_cfg_ = self.cls_predictor_cfg.copy()
cls_predictor_cfg_.update(
in_features=in_channels, out_features=cls_channels)
self.fc_cls = MODELS.build(cls_predictor_cfg_)
if self.with_reg:
box_dim = self.bbox_coder.encode_size
out_dim_reg = box_dim if reg_class_agnostic else \
box_dim * num_classes
reg_predictor_cfg_ = self.reg_predictor_cfg.copy()
if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):
reg_predictor_cfg_.update(
in_features=in_channels, out_features=out_dim_reg)
self.fc_reg = MODELS.build(reg_predictor_cfg_)
self.debug_imgs = None
if init_cfg is None:
self.init_cfg = []
if self.with_cls:
self.init_cfg += [
dict(
type='Normal', std=0.01, override=dict(name='fc_cls'))
]
if self.with_reg:
self.init_cfg += [
dict(
type='Normal', std=0.001, override=dict(name='fc_reg'))
]
# TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
@property
def custom_cls_channels(self) -> bool:
"""get custom_cls_channels from loss_cls."""
return getattr(self.loss_cls, 'custom_cls_channels', False)
# TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
@property
def custom_activation(self) -> bool:
"""get custom_activation from loss_cls."""
return getattr(self.loss_cls, 'custom_activation', False)
# TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
@property
def custom_accuracy(self) -> bool:
"""get custom_accuracy from loss_cls."""
return getattr(self.loss_cls, 'custom_accuracy', False)
def forward(self, x: Tuple[Tensor]) -> tuple:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of classification scores and bbox prediction.
- cls_score (Tensor): Classification scores for all
scale levels, each is a 4D-tensor, the channels number
is num_base_priors * num_classes.
- bbox_pred (Tensor): Box energies / deltas for all
scale levels, each is a 4D-tensor, the channels number
is num_base_priors * 4.
"""
if self.with_avg_pool:
if x.numel() > 0:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
else:
# avg_pool does not support empty tensor,
# so use torch.mean instead it
x = torch.mean(x, dim=(-1, -2))
cls_score = self.fc_cls(x) if self.with_cls else None
bbox_pred = self.fc_reg(x) if self.with_reg else None
return cls_score, bbox_pred
def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor,
pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,
cfg: ConfigDict) -> tuple:
"""Calculate the ground truth for proposals in the single image
according to the sampling results.
Args:
pos_priors (Tensor): Contains all the positive boxes,
has shape (num_pos, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
neg_priors (Tensor): Contains all the negative boxes,
has shape (num_neg, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_bboxes (Tensor): Contains gt_boxes for
all positive samples, has shape (num_pos, 4),
the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_labels (Tensor): Contains gt_labels for
all positive samples, has shape (num_pos, ).
cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
Returns:
Tuple[Tensor]: Ground truth for proposals
in a single image. Containing the following Tensors:
- labels(Tensor): Gt_labels for all proposals, has
shape (num_proposals,).
- label_weights(Tensor): Labels_weights for all
proposals, has shape (num_proposals,).
- bbox_targets(Tensor):Regression target for all
proposals, has shape (num_proposals, 4), the
last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights(Tensor):Regression weights for all
proposals, has shape (num_proposals, 4).
"""
num_pos = pos_priors.size(0)
num_neg = neg_priors.size(0)
num_samples = num_pos + num_neg
# original implementation uses new_zeros since BG are set to be 0
# now use empty & fill because BG cat_id = num_classes,
# FG cat_id = [0, num_classes-1]
labels = pos_priors.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \
else self.bbox_coder.encode_size
label_weights = pos_priors.new_zeros(num_samples)
bbox_targets = pos_priors.new_zeros(num_samples, reg_dim)
bbox_weights = pos_priors.new_zeros(num_samples, reg_dim)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
pos_priors, pos_gt_bboxes)
else:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, both
# the predicted boxes and regression targets should be with
# absolute coordinate format.
pos_bbox_targets = get_box_tensor(pos_gt_bboxes)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def get_targets(self,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict,
concat: bool = True) -> tuple:
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_targets_single` function.
Args:
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
"""
pos_priors_list = [res.pos_priors for res in sampling_results]
neg_priors_list = [res.neg_priors for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_targets_single,
pos_priors_list,
neg_priors_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def loss_and_target(self,
cls_score: Tensor,
bbox_pred: Tensor,
rois: Tensor,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict,
concat: bool = True,
reduction_override: Optional[str] = None) -> dict:
"""Calculate the loss based on the features extracted by the bbox head.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
bbox_pred (Tensor): Regression prediction results,
has shape
(batch_size * num_proposals_single_image, 4), the last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
rois (Tensor): RoIs with the shape
(batch_size * num_proposals_single_image, 5) where the first
column indicates batch id of each RoI.
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch. Defaults to True.
reduction_override (str, optional): The reduction
method used to override the original reduction
method of the loss. Options are "none",
"mean" and "sum". Defaults to None,
Returns:
dict: A dictionary of loss and targets components.
The targets are only used for cascade rcnn.
"""
cls_reg_targets = self.get_targets(
sampling_results, rcnn_train_cfg, concat=concat)
losses = self.loss(
cls_score,
bbox_pred,
rois,
*cls_reg_targets,
reduction_override=reduction_override)
# cls_reg_targets is only for cascade rcnn
return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)
def loss(self,
cls_score: Tensor,
bbox_pred: Tensor,
rois: Tensor,
labels: Tensor,
label_weights: Tensor,
bbox_targets: Tensor,
bbox_weights: Tensor,
reduction_override: Optional[str] = None) -> dict:
"""Calculate the loss based on the network predictions and targets.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
bbox_pred (Tensor): Regression prediction results,
has shape
(batch_size * num_proposals_single_image, 4), the last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
rois (Tensor): RoIs with the shape
(batch_size * num_proposals_single_image, 5) where the first
column indicates batch id of each RoI.
labels (Tensor): Gt_labels for all proposals in a batch, has
shape (batch_size * num_proposals_single_image, ).
label_weights (Tensor): Labels_weights for all proposals in a
batch, has shape (batch_size * num_proposals_single_image, ).
bbox_targets (Tensor): Regression target for all proposals in a
batch, has shape (batch_size * num_proposals_single_image, 4),
the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
bbox_weights (Tensor): Regression weights for all proposals in a
batch, has shape (batch_size * num_proposals_single_image, 4).
reduction_override (str, optional): The reduction
method used to override the original reduction
method of the loss. Options are "none",
"mean" and "sum". Defaults to None,
Returns:
dict: A dictionary of loss.
"""
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
loss_cls_ = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
if isinstance(loss_cls_, dict):
losses.update(loss_cls_)
else:
losses['loss_cls'] = loss_cls_
if self.custom_activation:
acc_ = self.loss_cls.get_accuracy(cls_score, labels)
losses.update(acc_)
else:
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
# 0~self.num_classes-1 are FG, self.num_classes is BG
pos_inds = (labels >= 0) & (labels < bg_class_ind)
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`,
# `GIouLoss`, `DIouLoss`) is applied directly on
# the decoded bounding boxes, it decodes the
# already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
bbox_pred = get_box_tensor(bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), self.num_classes,
-1)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
return losses
def predict_by_feat(self,
rois: Tuple[Tensor],
cls_scores: Tuple[Tensor],
bbox_preds: Tuple[Tensor],
batch_img_metas: List[dict],
rcnn_test_cfg: Optional[ConfigDict] = None,
rescale: bool = False) -> InstanceList:
"""Transform a batch of output features extracted from the head into
bbox results.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(cls_scores) == len(bbox_preds)
result_list = []
for img_id in range(len(batch_img_metas)):
img_meta = batch_img_metas[img_id]
results = self._predict_by_feat_single(
roi=rois[img_id],
cls_score=cls_scores[img_id],
bbox_pred=bbox_preds[img_id],
img_meta=img_meta,
rescale=rescale,
rcnn_test_cfg=rcnn_test_cfg)
result_list.append(results)
return result_list
def _predict_by_feat_single(
self,
roi: Tensor,
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
rescale: bool = False,
rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image\
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results = InstanceData()
if roi.shape[0] == 0:
return empty_instances([img_meta],
roi.device,
task_type='bbox',
instance_results=[results],
box_type=self.predict_box_type,
use_box_type=False,
num_classes=self.num_classes,
score_per_cls=rcnn_test_cfg is None)[0]
# some loss (Seesaw loss..) may have custom activation
if self.custom_cls_channels:
scores = self.loss_cls.get_activation(cls_score)
else:
scores = F.softmax(
cls_score, dim=-1) if cls_score is not None else None
img_shape = img_meta['img_shape']
num_rois = roi.size(0)
# bbox_pred would be None in some detector when with_reg is False,
# e.g. Grid R-CNN.
if bbox_pred is not None:
num_classes = 1 if self.reg_class_agnostic else self.num_classes
roi = roi.repeat_interleave(num_classes, dim=0)
bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)
bboxes = self.bbox_coder.decode(
roi[..., 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = roi[:, 1:].clone()
if img_shape is not None and bboxes.size(-1) == 4:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
bboxes = scale_boxes(bboxes, scale_factor)
# Get the inside tensor when `bboxes` is a box type
bboxes = get_box_tensor(bboxes)
box_dim = bboxes.size(-1)
bboxes = bboxes.view(num_rois, -1)
if rcnn_test_cfg is None:
# This means that it is aug test.
# It needs to return the raw results without nms.
results.bboxes = bboxes
results.scores = scores
else:
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img,
box_dim=box_dim)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_bboxes[:, -1]
results.labels = det_labels
return results
def refine_bboxes(self, sampling_results: Union[List[SamplingResult],
InstanceList],
bbox_results: dict,
batch_img_metas: List[dict]) -> InstanceList:
"""Refine bboxes during training.
Args:
sampling_results (List[:obj:`SamplingResult`] or
List[:obj:`InstanceData`]): Sampling results.
:obj:`SamplingResult` is the real sampling results
calculate from bbox_head, while :obj:`InstanceData` is
fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc.
bbox_results (dict): Usually is a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `rois` (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
- `bbox_targets` (tuple): Ground truth for proposals in a
single image. Containing the following list of Tensors:
(labels, label_weights, bbox_targets, bbox_weights)
batch_img_metas (List[dict]): List of image information.
Returns:
list[:obj:`InstanceData`]: Refined bboxes of each image.
Example:
>>> # xdoctest: +REQUIRES(module:kwarray)
>>> import numpy as np
>>> from mmdet.models.task_modules.samplers.
... sampling_result import random_boxes
>>> from mmdet.models.task_modules.samplers import SamplingResult
>>> self = BBoxHead(reg_class_agnostic=True)
>>> n_roi = 2
>>> n_img = 4
>>> scale = 512
>>> rng = np.random.RandomState(0)
... batch_img_metas = [{'img_shape': (scale, scale)}
>>> for _ in range(n_img)]
>>> sampling_results = [SamplingResult.random(rng=10)
... for _ in range(n_img)]
>>> # Create rois in the expected format
>>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
>>> img_ids = torch.randint(0, n_img, (n_roi,))
>>> img_ids = img_ids.float()
>>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
>>> # Create other args
>>> labels = torch.randint(0, 81, (scale,)).long()
>>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
>>> cls_score = torch.randn((scale, 81))
... # For each image, pretend random positive boxes are gts
>>> bbox_targets = (labels, None, None, None)
... bbox_results = dict(rois=rois, bbox_pred=bbox_preds,
... cls_score=cls_score,
... bbox_targets=bbox_targets)
>>> bboxes_list = self.refine_bboxes(sampling_results,
... bbox_results,
... batch_img_metas)
>>> print(bboxes_list)
"""
pos_is_gts = [res.pos_is_gt for res in sampling_results]
# bbox_targets is a tuple
labels = bbox_results['bbox_targets'][0]
cls_scores = bbox_results['cls_score']
rois = bbox_results['rois']
bbox_preds = bbox_results['bbox_pred']
if self.custom_activation:
# TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
cls_scores = self.loss_cls.get_activation(cls_scores)
if cls_scores.numel() == 0:
return None
if cls_scores.shape[-1] == self.num_classes + 1:
# remove background class
cls_scores = cls_scores[:, :-1]
elif cls_scores.shape[-1] != self.num_classes:
raise ValueError('The last dim of `cls_scores` should equal to '
'`num_classes` or `num_classes + 1`,'
f'but got {cls_scores.shape[-1]}.')
labels = torch.where(labels == self.num_classes, cls_scores.argmax(1),
labels)
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() <= len(batch_img_metas)
results_list = []
for i in range(len(batch_img_metas)):
inds = torch.nonzero(
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
bbox_pred_ = bbox_preds[inds]
img_meta_ = batch_img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])
results_list.append(results)
return results_list
def regress_by_class(self, priors: Tensor, label: Tensor,
bbox_pred: Tensor, img_meta: dict) -> Tensor:
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
priors (Tensor): Priors from `rpn_head` or last stage
`bbox_head`, has shape (num_proposals, 4).
label (Tensor): Only used when `self.reg_class_agnostic`
is False, has shape (num_proposals, ).
bbox_pred (Tensor): Regression prediction of
current stage `bbox_head`. When `self.reg_class_agnostic`
is False, it has shape (n, num_classes * 4), otherwise
it has shape (n, 4).
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
reg_dim = self.bbox_coder.encode_size
if not self.reg_class_agnostic:
label = label * reg_dim
inds = torch.stack([label + i for i in range(reg_dim)], 1)
bbox_pred = torch.gather(bbox_pred, 1, inds)
assert bbox_pred.size()[1] == reg_dim
max_shape = img_meta['img_shape']
regressed_bboxes = self.bbox_coder.decode(
priors, bbox_pred, max_shape=max_shape)
return regressed_bboxes
| 32,325 | 44.593794 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/sabl_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Sequence, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.layers import multiclass_nms
from mmdet.models.losses import accuracy
from mmdet.models.task_modules import SamplingResult
from mmdet.models.utils import multi_apply
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig
from .bbox_head import BBoxHead
@MODELS.register_module()
class SABLHead(BBoxHead):
"""Side-Aware Boundary Localization (SABL) for RoI-Head.
Side-Aware features are extracted by conv layers
with an attention mechanism.
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented in BucketingBBoxCoder.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
cls_in_channels (int): Input channels of cls RoI feature. \
Defaults to 256.
reg_in_channels (int): Input channels of reg RoI feature. \
Defaults to 256.
roi_feat_size (int): Size of RoI features. Defaults to 7.
reg_feat_up_ratio (int): Upsample ratio of reg features. \
Defaults to 2.
reg_pre_kernel (int): Kernel of 2D conv layers before \
attention pooling. Defaults to 3.
reg_post_kernel (int): Kernel of 1D conv layers after \
attention pooling. Defaults to 3.
reg_pre_num (int): Number of pre convs. Defaults to 2.
reg_post_num (int): Number of post convs. Defaults to 1.
num_classes (int): Number of classes in dataset. Defaults to 80.
cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
reg_offset_out_channels (int): Hidden and output channel \
of reg offset branch. Defaults to 256.
reg_cls_out_channels (int): Hidden and output channel \
of reg cls branch. Defaults to 256.
num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
reg_class_agnostic (bool): Class agnostic regression or not. \
Defaults to True.
norm_cfg (dict): Config of norm layers. Defaults to None.
bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
loss_cls (dict): Config of classification loss.
loss_bbox_cls (dict): Config of classification loss for bbox branch.
loss_bbox_reg (dict): Config of regression loss for bbox branch.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
num_classes: int,
cls_in_channels: int = 256,
reg_in_channels: int = 256,
roi_feat_size: int = 7,
reg_feat_up_ratio: int = 2,
reg_pre_kernel: int = 3,
reg_post_kernel: int = 3,
reg_pre_num: int = 2,
reg_post_num: int = 1,
cls_out_channels: int = 1024,
reg_offset_out_channels: int = 256,
reg_cls_out_channels: int = 256,
num_cls_fcs: int = 1,
num_reg_fcs: int = 0,
reg_class_agnostic: bool = True,
norm_cfg: OptConfigType = None,
bbox_coder: ConfigType = dict(
type='BucketingBBoxCoder',
num_buckets=14,
scale_factor=1.7),
loss_cls: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox_cls: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox_reg: ConfigType = dict(
type='SmoothL1Loss', beta=0.1, loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super(BBoxHead, self).__init__(init_cfg=init_cfg)
self.cls_in_channels = cls_in_channels
self.reg_in_channels = reg_in_channels
self.roi_feat_size = roi_feat_size
self.reg_feat_up_ratio = int(reg_feat_up_ratio)
self.num_buckets = bbox_coder['num_buckets']
assert self.reg_feat_up_ratio // 2 >= 1
self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
assert self.up_reg_feat_size == bbox_coder['num_buckets']
self.reg_pre_kernel = reg_pre_kernel
self.reg_post_kernel = reg_post_kernel
self.reg_pre_num = reg_pre_num
self.reg_post_num = reg_post_num
self.num_classes = num_classes
self.cls_out_channels = cls_out_channels
self.reg_offset_out_channels = reg_offset_out_channels
self.reg_cls_out_channels = reg_cls_out_channels
self.num_cls_fcs = num_cls_fcs
self.num_reg_fcs = num_reg_fcs
self.reg_class_agnostic = reg_class_agnostic
assert self.reg_class_agnostic
self.norm_cfg = norm_cfg
self.bbox_coder = TASK_UTILS.build(bbox_coder)
self.loss_cls = MODELS.build(loss_cls)
self.loss_bbox_cls = MODELS.build(loss_bbox_cls)
self.loss_bbox_reg = MODELS.build(loss_bbox_reg)
self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
self.cls_in_channels,
self.roi_feat_size,
self.cls_out_channels)
self.side_num = int(np.ceil(self.num_buckets / 2))
if self.reg_feat_up_ratio > 1:
self.upsample_x = nn.ConvTranspose1d(
reg_in_channels,
reg_in_channels,
self.reg_feat_up_ratio,
stride=self.reg_feat_up_ratio)
self.upsample_y = nn.ConvTranspose1d(
reg_in_channels,
reg_in_channels,
self.reg_feat_up_ratio,
stride=self.reg_feat_up_ratio)
self.reg_pre_convs = nn.ModuleList()
for i in range(self.reg_pre_num):
reg_pre_conv = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=reg_pre_kernel,
padding=reg_pre_kernel // 2,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_pre_convs.append(reg_pre_conv)
self.reg_post_conv_xs = nn.ModuleList()
for i in range(self.reg_post_num):
reg_post_conv_x = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=(1, reg_post_kernel),
padding=(0, reg_post_kernel // 2),
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_post_conv_xs.append(reg_post_conv_x)
self.reg_post_conv_ys = nn.ModuleList()
for i in range(self.reg_post_num):
reg_post_conv_y = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=(reg_post_kernel, 1),
padding=(reg_post_kernel // 2, 0),
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_post_conv_ys.append(reg_post_conv_y)
self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
self.relu = nn.ReLU(inplace=True)
self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
self.reg_in_channels, 1,
self.reg_cls_out_channels)
self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
self.reg_in_channels, 1,
self.reg_offset_out_channels)
self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
if init_cfg is None:
self.init_cfg = [
dict(
type='Xavier',
layer='Linear',
distribution='uniform',
override=[
dict(type='Normal', name='reg_conv_att_x', std=0.01),
dict(type='Normal', name='reg_conv_att_y', std=0.01),
dict(type='Normal', name='fc_reg_cls', std=0.01),
dict(type='Normal', name='fc_cls', std=0.01),
dict(type='Normal', name='fc_reg_offset', std=0.001)
])
]
if self.reg_feat_up_ratio > 1:
self.init_cfg += [
dict(
type='Kaiming',
distribution='normal',
override=[
dict(name='upsample_x'),
dict(name='upsample_y')
])
]
def _add_fc_branch(self, num_branch_fcs: int, in_channels: int,
roi_feat_size: int,
fc_out_channels: int) -> nn.ModuleList:
"""build fc layers."""
in_channels = in_channels * roi_feat_size * roi_feat_size
branch_fcs = nn.ModuleList()
for i in range(num_branch_fcs):
fc_in_channels = (in_channels if i == 0 else fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
return branch_fcs
def cls_forward(self, cls_x: Tensor) -> Tensor:
"""forward of classification fc layers."""
cls_x = cls_x.view(cls_x.size(0), -1)
for fc in self.cls_fcs:
cls_x = self.relu(fc(cls_x))
cls_score = self.fc_cls(cls_x)
return cls_score
def attention_pool(self, reg_x: Tensor) -> tuple:
"""Extract direction-specific features fx and fy with attention
methanism."""
reg_fx = reg_x
reg_fy = reg_x
reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
return reg_fx, reg_fy
def side_aware_feature_extractor(self, reg_x: Tensor) -> tuple:
"""Refine and extract side-aware features without split them."""
for reg_pre_conv in self.reg_pre_convs:
reg_x = reg_pre_conv(reg_x)
reg_fx, reg_fy = self.attention_pool(reg_x)
if self.reg_post_num > 0:
reg_fx = reg_fx.unsqueeze(2)
reg_fy = reg_fy.unsqueeze(3)
for i in range(self.reg_post_num):
reg_fx = self.reg_post_conv_xs[i](reg_fx)
reg_fy = self.reg_post_conv_ys[i](reg_fy)
reg_fx = reg_fx.squeeze(2)
reg_fy = reg_fy.squeeze(3)
if self.reg_feat_up_ratio > 1:
reg_fx = self.relu(self.upsample_x(reg_fx))
reg_fy = self.relu(self.upsample_y(reg_fy))
reg_fx = torch.transpose(reg_fx, 1, 2)
reg_fy = torch.transpose(reg_fy, 1, 2)
return reg_fx.contiguous(), reg_fy.contiguous()
def reg_pred(self, x: Tensor, offset_fcs: nn.ModuleList,
cls_fcs: nn.ModuleList) -> tuple:
"""Predict bucketing estimation (cls_pred) and fine regression (offset
pred) with side-aware features."""
x_offset = x.view(-1, self.reg_in_channels)
x_cls = x.view(-1, self.reg_in_channels)
for fc in offset_fcs:
x_offset = self.relu(fc(x_offset))
for fc in cls_fcs:
x_cls = self.relu(fc(x_cls))
offset_pred = self.fc_reg_offset(x_offset)
cls_pred = self.fc_reg_cls(x_cls)
offset_pred = offset_pred.view(x.size(0), -1)
cls_pred = cls_pred.view(x.size(0), -1)
return offset_pred, cls_pred
def side_aware_split(self, feat: Tensor) -> Tensor:
"""Split side-aware features aligned with orders of bucketing
targets."""
l_end = int(np.ceil(self.up_reg_feat_size / 2))
r_start = int(np.floor(self.up_reg_feat_size / 2))
feat_fl = feat[:, :l_end]
feat_fr = feat[:, r_start:].flip(dims=(1, ))
feat_fl = feat_fl.contiguous()
feat_fr = feat_fr.contiguous()
feat = torch.cat([feat_fl, feat_fr], dim=-1)
return feat
def bbox_pred_split(self, bbox_pred: tuple,
num_proposals_per_img: Sequence[int]) -> tuple:
"""Split batch bbox prediction back to each image."""
bucket_cls_preds, bucket_offset_preds = bbox_pred
bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
bucket_offset_preds = bucket_offset_preds.split(
num_proposals_per_img, 0)
bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
return bbox_pred
def reg_forward(self, reg_x: Tensor) -> tuple:
"""forward of regression branch."""
outs = self.side_aware_feature_extractor(reg_x)
edge_offset_preds = []
edge_cls_preds = []
reg_fx = outs[0]
reg_fy = outs[1]
offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
self.reg_cls_fcs)
offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
self.reg_cls_fcs)
offset_pred_x = self.side_aware_split(offset_pred_x)
offset_pred_y = self.side_aware_split(offset_pred_y)
cls_pred_x = self.side_aware_split(cls_pred_x)
cls_pred_y = self.side_aware_split(cls_pred_y)
edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
return edge_cls_preds, edge_offset_preds
def forward(self, x: Tensor) -> tuple:
"""Forward features from the upstream network."""
bbox_pred = self.reg_forward(x)
cls_score = self.cls_forward(x)
return cls_score, bbox_pred
def get_targets(self,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict,
concat: bool = True) -> tuple:
"""Calculate the ground truth for all samples in a batch according to
the sampling_results."""
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = self.bucket_target(
pos_proposals,
neg_proposals,
pos_gt_bboxes,
pos_gt_labels,
rcnn_train_cfg,
concat=concat)
(labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
return (labels, label_weights, (bucket_cls_targets,
bucket_offset_targets),
(bucket_cls_weights, bucket_offset_weights))
def bucket_target(self,
pos_proposals_list: list,
neg_proposals_list: list,
pos_gt_bboxes_list: list,
pos_gt_labels_list: list,
rcnn_train_cfg: ConfigDict,
concat: bool = True) -> tuple:
"""Compute bucketing estimation targets and fine regression targets for
a batch of images."""
(labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights) = multi_apply(
self._bucket_target_single,
pos_proposals_list,
neg_proposals_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights)
def _bucket_target_single(self, pos_proposals: Tensor,
neg_proposals: Tensor, pos_gt_bboxes: Tensor,
pos_gt_labels: Tensor, cfg: ConfigDict) -> tuple:
"""Compute bucketing estimation targets and fine regression targets for
a single image.
Args:
pos_proposals (Tensor): positive proposals of a single image,
Shape (n_pos, 4)
neg_proposals (Tensor): negative proposals of a single image,
Shape (n_neg, 4).
pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
of a single image, Shape (n_pos, 4).
pos_gt_labels (Tensor): gt labels assigned to positive proposals
of a single image, Shape (n_pos, ).
cfg (dict): Config of calculating targets
Returns:
tuple:
- labels (Tensor): Labels in a single image. Shape (n,).
- label_weights (Tensor): Label weights in a single image.
Shape (n,)
- bucket_cls_targets (Tensor): Bucket cls targets in
a single image. Shape (n, num_buckets*2).
- bucket_cls_weights (Tensor): Bucket cls weights in
a single image. Shape (n, num_buckets*2).
- bucket_offset_targets (Tensor): Bucket offset targets
in a single image. Shape (n, num_buckets*2).
- bucket_offset_targets (Tensor): Bucket offset weights
in a single image. Shape (n, num_buckets*2).
"""
num_pos = pos_proposals.size(0)
num_neg = neg_proposals.size(0)
num_samples = num_pos + num_neg
labels = pos_gt_bboxes.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_proposals.new_zeros(num_samples)
bucket_cls_targets = pos_proposals.new_zeros(num_samples,
4 * self.side_num)
bucket_cls_weights = pos_proposals.new_zeros(num_samples,
4 * self.side_num)
bucket_offset_targets = pos_proposals.new_zeros(
num_samples, 4 * self.side_num)
bucket_offset_weights = pos_proposals.new_zeros(
num_samples, 4 * self.side_num)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
label_weights[:num_pos] = 1.0
(pos_bucket_offset_targets, pos_bucket_offset_weights,
pos_bucket_cls_targets,
pos_bucket_cls_weights) = self.bbox_coder.encode(
pos_proposals, pos_gt_bboxes)
bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights)
def loss(self,
cls_score: Tensor,
bbox_pred: Tuple[Tensor, Tensor],
rois: Tensor,
labels: Tensor,
label_weights: Tensor,
bbox_targets: Tuple[Tensor, Tensor],
bbox_weights: Tuple[Tensor, Tensor],
reduction_override: Optional[str] = None) -> dict:
"""Calculate the loss based on the network predictions and targets.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
bbox_pred (Tensor): A tuple of regression prediction results
containing `bucket_cls_preds and` `bucket_offset_preds`.
rois (Tensor): RoIs with the shape
(batch_size * num_proposals_single_image, 5) where the first
column indicates batch id of each RoI.
labels (Tensor): Gt_labels for all proposals in a batch, has
shape (batch_size * num_proposals_single_image, ).
label_weights (Tensor): Labels_weights for all proposals in a
batch, has shape (batch_size * num_proposals_single_image, ).
bbox_targets (Tuple[Tensor, Tensor]): A tuple of regression target
containing `bucket_cls_targets` and `bucket_offset_targets`.
the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
bbox_weights (Tuple[Tensor, Tensor]): A tuple of regression
weights containing `bucket_cls_weights` and
`bucket_offset_weights`.
reduction_override (str, optional): The reduction
method used to override the original reduction
method of the loss. Options are "none",
"mean" and "sum". Defaults to None,
Returns:
dict: A dictionary of loss.
"""
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bucket_cls_preds, bucket_offset_preds = bbox_pred
bucket_cls_targets, bucket_offset_targets = bbox_targets
bucket_cls_weights, bucket_offset_weights = bbox_weights
# edge cls
bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
losses['loss_bbox_cls'] = self.loss_bbox_cls(
bucket_cls_preds,
bucket_cls_targets,
bucket_cls_weights,
avg_factor=bucket_cls_targets.size(0),
reduction_override=reduction_override)
losses['loss_bbox_reg'] = self.loss_bbox_reg(
bucket_offset_preds,
bucket_offset_targets,
bucket_offset_weights,
avg_factor=bucket_offset_targets.size(0),
reduction_override=reduction_override)
return losses
def _predict_by_feat_single(
self,
roi: Tensor,
cls_score: Tensor,
bbox_pred: Tuple[Tensor, Tensor],
img_meta: dict,
rescale: bool = False,
rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tuple[Tensor, Tensor]): Box cls preds and offset preds.
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results = InstanceData()
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
img_shape = img_meta['img_shape']
if bbox_pred is not None:
bboxes, confidences = self.bbox_coder.decode(
roi[:, 1:], bbox_pred, img_shape)
else:
bboxes = roi[:, 1:].clone()
confidences = None
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale and bboxes.size(0) > 0:
assert img_meta.get('scale_factor') is not None
scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(
(1, 2))
bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(
bboxes.size()[0], -1)
if rcnn_test_cfg is None:
results.bboxes = bboxes
results.scores = scores
else:
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img,
score_factors=confidences)
results.bboxes = det_bboxes[:, :4]
results.scores = det_bboxes[:, -1]
results.labels = det_labels
return results
def refine_bboxes(self, sampling_results: List[SamplingResult],
bbox_results: dict,
batch_img_metas: List[dict]) -> InstanceList:
"""Refine bboxes during training.
Args:
sampling_results (List[:obj:`SamplingResult`]): Sampling results.
bbox_results (dict): Usually is a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `rois` (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
- `bbox_targets` (tuple): Ground truth for proposals in a
single image. Containing the following list of Tensors:
(labels, label_weights, bbox_targets, bbox_weights)
batch_img_metas (List[dict]): List of image information.
Returns:
list[:obj:`InstanceData`]: Refined bboxes of each image.
"""
pos_is_gts = [res.pos_is_gt for res in sampling_results]
# bbox_targets is a tuple
labels = bbox_results['bbox_targets'][0]
cls_scores = bbox_results['cls_score']
rois = bbox_results['rois']
bbox_preds = bbox_results['bbox_pred']
if cls_scores.numel() == 0:
return None
labels = torch.where(labels == self.num_classes,
cls_scores[:, :-1].argmax(1), labels)
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() <= len(batch_img_metas)
results_list = []
for i in range(len(batch_img_metas)):
inds = torch.nonzero(
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
edge_cls_preds, edge_offset_preds = bbox_preds
edge_cls_preds_ = edge_cls_preds[inds]
edge_offset_preds_ = edge_offset_preds[inds]
bbox_pred_ = (edge_cls_preds_, edge_offset_preds_)
img_meta_ = batch_img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])
results_list.append(results)
return results_list
def regress_by_class(self, rois: Tensor, label: Tensor, bbox_pred: tuple,
img_meta: dict) -> Tensor:
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): shape (n, 4) or (n, 5)
label (Tensor): shape (n, )
bbox_pred (Tuple[Tensor]): shape [(n, num_buckets *2), \
(n, num_buckets *2)]
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5
if rois.size(1) == 4:
new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
img_meta['img_shape'])
else:
bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
img_meta['img_shape'])
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
| 30,500 | 43.527007 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .multi_instance_bbox_head import MultiInstanceBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead', 'MultiInstanceBBoxHead'
]
| 609 | 37.125 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/dii_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
import torch.nn as nn
from mmcv.cnn import build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmengine.config import ConfigDict
from mmengine.model import bias_init_with_prob
from torch import Tensor
from mmdet.models.losses import accuracy
from mmdet.models.task_modules import SamplingResult
from mmdet.models.utils import multi_apply
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, reduce_mean
from .bbox_head import BBoxHead
@MODELS.register_module()
class DIIHead(BBoxHead):
r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object
Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_
Args:
num_classes (int): Number of class in dataset.
Defaults to 80.
num_ffn_fcs (int): The number of fully-connected
layers in FFNs. Defaults to 2.
num_heads (int): The hidden dimension of FFNs.
Defaults to 8.
num_cls_fcs (int): The number of fully-connected
layers in classification subnet. Defaults to 1.
num_reg_fcs (int): The number of fully-connected
layers in regression subnet. Defaults to 3.
feedforward_channels (int): The hidden dimension
of FFNs. Defaults to 2048
in_channels (int): Hidden_channels of MultiheadAttention.
Defaults to 256.
dropout (float): Probability of drop the channel.
Defaults to 0.0
ffn_act_cfg (:obj:`ConfigDict` or dict): The activation config
for FFNs.
dynamic_conv_cfg (:obj:`ConfigDict` or dict): The convolution
config for DynamicConv.
loss_iou (:obj:`ConfigDict` or dict): The config for iou or
giou loss.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict. Defaults to None.
"""
def __init__(self,
num_classes: int = 80,
num_ffn_fcs: int = 2,
num_heads: int = 8,
num_cls_fcs: int = 1,
num_reg_fcs: int = 3,
feedforward_channels: int = 2048,
in_channels: int = 256,
dropout: float = 0.0,
ffn_act_cfg: ConfigType = dict(type='ReLU', inplace=True),
dynamic_conv_cfg: ConfigType = dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0),
init_cfg: OptConfigType = None,
**kwargs) -> None:
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(
num_classes=num_classes,
reg_decoded_bbox=True,
reg_class_agnostic=True,
init_cfg=init_cfg,
**kwargs)
self.loss_iou = MODELS.build(loss_iou)
self.in_channels = in_channels
self.fp16_enabled = False
self.attention = MultiheadAttention(in_channels, num_heads, dropout)
self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg)
self.instance_interactive_conv_dropout = nn.Dropout(dropout)
self.instance_interactive_conv_norm = build_norm_layer(
dict(type='LN'), in_channels)[1]
self.ffn = FFN(
in_channels,
feedforward_channels,
num_ffn_fcs,
act_cfg=ffn_act_cfg,
dropout=dropout)
self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.cls_fcs = nn.ModuleList()
for _ in range(num_cls_fcs):
self.cls_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.cls_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.cls_fcs.append(
build_activation_layer(dict(type='ReLU', inplace=True)))
# over load the self.fc_cls in BBoxHead
if self.loss_cls.use_sigmoid:
self.fc_cls = nn.Linear(in_channels, self.num_classes)
else:
self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)
self.reg_fcs = nn.ModuleList()
for _ in range(num_reg_fcs):
self.reg_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.reg_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.reg_fcs.append(
build_activation_layer(dict(type='ReLU', inplace=True)))
# over load the self.fc_cls in BBoxHead
self.fc_reg = nn.Linear(in_channels, 4)
assert self.reg_class_agnostic, 'DIIHead only ' \
'suppport `reg_class_agnostic=True` '
assert self.reg_decoded_bbox, 'DIIHead only ' \
'suppport `reg_decoded_bbox=True`'
def init_weights(self) -> None:
"""Use xavier initialization for all weight parameter and set
classification head bias as a specific value when use focal loss."""
super().init_weights()
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
# adopt the default initialization for
# the weight and bias of the layer norm
pass
if self.loss_cls.use_sigmoid:
bias_init = bias_init_with_prob(0.01)
nn.init.constant_(self.fc_cls.bias, bias_init)
def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> tuple:
"""Forward function of Dynamic Instance Interactive Head.
Args:
roi_feat (Tensor): Roi-pooling features with shape
(batch_size*num_proposals, feature_dimensions,
pooling_h , pooling_w).
proposal_feat (Tensor): Intermediate feature get from
diihead in last stage, has shape
(batch_size, num_proposals, feature_dimensions)
Returns:
tuple[Tensor]: Usually a tuple of classification scores
and bbox prediction and a intermediate feature.
- cls_scores (Tensor): Classification scores for
all proposals, has shape
(batch_size, num_proposals, num_classes).
- bbox_preds (Tensor): Box energies / deltas for
all proposals, has shape
(batch_size, num_proposals, 4).
- obj_feat (Tensor): Object feature before classification
and regression subnet, has shape
(batch_size, num_proposal, feature_dimensions).
- attn_feats (Tensor): Intermediate feature.
"""
N, num_proposals = proposal_feat.shape[:2]
# Self attention
proposal_feat = proposal_feat.permute(1, 0, 2)
proposal_feat = self.attention_norm(self.attention(proposal_feat))
attn_feats = proposal_feat.permute(1, 0, 2)
# instance interactive
proposal_feat = attn_feats.reshape(-1, self.in_channels)
proposal_feat_iic = self.instance_interactive_conv(
proposal_feat, roi_feat)
proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(
proposal_feat_iic)
obj_feat = self.instance_interactive_conv_norm(proposal_feat)
# FFN
obj_feat = self.ffn_norm(self.ffn(obj_feat))
cls_feat = obj_feat
reg_feat = obj_feat
for cls_layer in self.cls_fcs:
cls_feat = cls_layer(cls_feat)
for reg_layer in self.reg_fcs:
reg_feat = reg_layer(reg_feat)
cls_score = self.fc_cls(cls_feat).view(
N, num_proposals, self.num_classes
if self.loss_cls.use_sigmoid else self.num_classes + 1)
bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4)
return cls_score, bbox_delta, obj_feat.view(
N, num_proposals, self.in_channels), attn_feats
def loss_and_target(self,
cls_score: Tensor,
bbox_pred: Tensor,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigType,
imgs_whwh: Tensor,
concat: bool = True,
reduction_override: str = None) -> dict:
"""Calculate the loss based on the features extracted by the DIIHead.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
bbox_pred (Tensor): Regression prediction results, has shape
(batch_size * num_proposals_single_image, 4), the last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\
shape (batch_size, num_proposals, 4), the last
dimension means
[img_width,img_height, img_width, img_height].
concat (bool): Whether to concatenate the results of all
the images in a single batch. Defaults to True.
reduction_override (str, optional): The reduction
method used to override the original reduction
method of the loss. Options are "none",
"mean" and "sum". Defaults to None.
Returns:
dict: A dictionary of loss and targets components.
The targets are only used for cascade rcnn.
"""
cls_reg_targets = self.get_targets(
sampling_results=sampling_results,
rcnn_train_cfg=rcnn_train_cfg,
concat=concat)
(labels, label_weights, bbox_targets, bbox_weights) = cls_reg_targets
losses = dict()
bg_class_ind = self.num_classes
# note in spare rcnn num_gt == num_pos
pos_inds = (labels >= 0) & (labels < bg_class_ind)
num_pos = pos_inds.sum().float()
avg_factor = reduce_mean(num_pos)
if cls_score is not None:
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['pos_acc'] = accuracy(cls_score[pos_inds],
labels[pos_inds])
if bbox_pred is not None:
# 0~self.num_classes-1 are FG, self.num_classes is BG
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),
4)[pos_inds.type(torch.bool)]
imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),
4)[pos_inds.type(torch.bool)]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred / imgs_whwh,
bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=avg_factor)
losses['loss_iou'] = self.loss_iou(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=avg_factor)
else:
losses['loss_bbox'] = bbox_pred.sum() * 0
losses['loss_iou'] = bbox_pred.sum() * 0
return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)
def _get_targets_single(self, pos_inds: Tensor, neg_inds: Tensor,
pos_priors: Tensor, neg_priors: Tensor,
pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,
cfg: ConfigDict) -> tuple:
"""Calculate the ground truth for proposals in the single image
according to the sampling results.
Almost the same as the implementation in `bbox_head`,
we add pos_inds and neg_inds to select positive and
negative samples instead of selecting the first num_pos
as positive samples.
Args:
pos_inds (Tensor): The length is equal to the
positive sample numbers contain all index
of the positive sample in the origin proposal set.
neg_inds (Tensor): The length is equal to the
negative sample numbers contain all index
of the negative sample in the origin proposal set.
pos_priors (Tensor): Contains all the positive boxes,
has shape (num_pos, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
neg_priors (Tensor): Contains all the negative boxes,
has shape (num_neg, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_bboxes (Tensor): Contains gt_boxes for
all positive samples, has shape (num_pos, 4),
the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_labels (Tensor): Contains gt_labels for
all positive samples, has shape (num_pos, ).
cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following Tensors:
- labels(Tensor): Gt_labels for all proposals, has
shape (num_proposals,).
- label_weights(Tensor): Labels_weights for all proposals, has
shape (num_proposals,).
- bbox_targets(Tensor):Regression target for all proposals, has
shape (num_proposals, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
- bbox_weights(Tensor):Regression weights for all proposals,
has shape (num_proposals, 4).
"""
num_pos = pos_priors.size(0)
num_neg = neg_priors.size(0)
num_samples = num_pos + num_neg
# original implementation uses new_zeros since BG are set to be 0
# now use empty & fill because BG cat_id = num_classes,
# FG cat_id = [0, num_classes-1]
labels = pos_priors.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_priors.new_zeros(num_samples)
bbox_targets = pos_priors.new_zeros(num_samples, 4)
bbox_weights = pos_priors.new_zeros(num_samples, 4)
if num_pos > 0:
labels[pos_inds] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[pos_inds] = pos_weight
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
pos_priors, pos_gt_bboxes)
else:
pos_bbox_targets = pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1
if num_neg > 0:
label_weights[neg_inds] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def get_targets(self,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict,
concat: bool = True) -> tuple:
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_targets_single` function.
Args:
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise just
a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has shape
(num_proposals,) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list has
shape (num_proposals, 4) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals, 4),
the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
"""
pos_inds_list = [res.pos_inds for res in sampling_results]
neg_inds_list = [res.neg_inds for res in sampling_results]
pos_priors_list = [res.pos_priors for res in sampling_results]
neg_priors_list = [res.neg_priors for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_targets_single,
pos_inds_list,
neg_inds_list,
pos_priors_list,
neg_priors_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
| 19,266 | 44.548463 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from torch import Tensor
from mmdet.registry import MODELS
from .bbox_head import BBoxHead
@MODELS.register_module()
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs: int = 0,
num_shared_fcs: int = 0,
num_cls_convs: int = 0,
num_cls_fcs: int = 0,
num_reg_convs: int = 0,
num_reg_fcs: int = 0,
conv_out_channels: int = 256,
fc_out_channels: int = 1024,
conv_cfg: Optional[Union[dict, ConfigDict]] = None,
norm_cfg: Optional[Union[dict, ConfigDict]] = None,
init_cfg: Optional[Union[dict, ConfigDict]] = None,
*args,
**kwargs) -> None:
super().__init__(*args, init_cfg=init_cfg, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
if self.custom_cls_channels:
cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
else:
cls_channels = self.num_classes + 1
cls_predictor_cfg_ = self.cls_predictor_cfg.copy()
cls_predictor_cfg_.update(
in_features=self.cls_last_dim, out_features=cls_channels)
self.fc_cls = MODELS.build(cls_predictor_cfg_)
if self.with_reg:
box_dim = self.bbox_coder.encode_size
out_dim_reg = box_dim if self.reg_class_agnostic else \
box_dim * self.num_classes
reg_predictor_cfg_ = self.reg_predictor_cfg.copy()
if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):
reg_predictor_cfg_.update(
in_features=self.reg_last_dim, out_features=out_dim_reg)
self.fc_reg = MODELS.build(reg_predictor_cfg_)
if init_cfg is None:
# when init_cfg is None,
# It has been set to
# [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],
# [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]
# after `super(ConvFCBBoxHead, self).__init__()`
# we only need to append additional configuration
# for `shared_fcs`, `cls_fcs` and `reg_fcs`
self.init_cfg += [
dict(
type='Xavier',
distribution='uniform',
override=[
dict(name='shared_fcs'),
dict(name='cls_fcs'),
dict(name='reg_fcs')
])
]
def _add_conv_fc_branch(self,
num_branch_convs: int,
num_branch_fcs: int,
in_channels: int,
is_shared: bool = False) -> tuple:
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def forward(self, x: Tuple[Tensor]) -> tuple:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of classification scores and bbox prediction.
- cls_score (Tensor): Classification scores for all \
scale levels, each is a 4D-tensor, the channels number \
is num_base_priors * num_classes.
- bbox_pred (Tensor): Box energies / deltas for all \
scale levels, each is a 4D-tensor, the channels number \
is num_base_priors * 4.
"""
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
@MODELS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None:
super().__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@MODELS.register_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None:
super().__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| 9,510 | 37.044 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule, ModuleList
from torch import Tensor
from mmdet.models.backbones.resnet import Bottleneck
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, MultiConfig, OptConfigType, OptMultiConfig
from .bbox_head import BBoxHead
class BasicResBlock(BaseModule):
"""Basic residual block.
This block is a little different from the block in the ResNet backbone.
The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
Args:
in_channels (int): Channels of the input feature map.
out_channels (int): Channels of the output feature map.
conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict
for convolution layers.
norm_cfg (:obj:`ConfigDict` or dict): The config dict for
normalization layers.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
# main path
self.conv1 = ConvModule(
in_channels,
in_channels,
kernel_size=3,
padding=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.conv2 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
# identity path
self.conv_identity = ConvModule(
in_channels,
out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = x + identity
out = self.relu(out)
return out
@MODELS.register_module()
class DoubleConvFCBBoxHead(BBoxHead):
r"""Bbox head used in Double-Head R-CNN
.. code-block:: none
/-> cls
/-> shared convs ->
\-> reg
roi features
/-> cls
\-> shared fc ->
\-> reg
""" # noqa: W605
def __init__(self,
num_convs: int = 0,
num_fcs: int = 0,
conv_out_channels: int = 1024,
fc_out_channels: int = 1024,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: MultiConfig = dict(
type='Normal',
override=[
dict(type='Normal', name='fc_cls', std=0.01),
dict(type='Normal', name='fc_reg', std=0.001),
dict(
type='Xavier',
name='fc_branch',
distribution='uniform')
]),
**kwargs) -> None:
kwargs.setdefault('with_avg_pool', True)
super().__init__(init_cfg=init_cfg, **kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# increase the channel of input features
self.res_block = BasicResBlock(self.in_channels,
self.conv_out_channels)
# add conv heads
self.conv_branch = self._add_conv_branch()
# add fc heads
self.fc_branch = self._add_fc_branch()
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
self.relu = nn.ReLU()
def _add_conv_branch(self) -> None:
"""Add the fc branch which consists of a sequential of conv layers."""
branch_convs = ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self) -> None:
"""Add the fc branch which consists of a sequential of fc layers."""
branch_fcs = ModuleList()
for i in range(self.num_fcs):
fc_in_channels = (
self.in_channels *
self.roi_feat_area if i == 0 else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def forward(self, x_cls: Tensor, x_reg: Tensor) -> Tuple[Tensor]:
"""Forward features from the upstream network.
Args:
x_cls (Tensor): Classification features of rois
x_reg (Tensor): Regression features from the upstream network.
Returns:
tuple: A tuple of classification scores and bbox prediction.
- cls_score (Tensor): Classification score predictions of rois.
each roi predicts num_classes + 1 channels.
- bbox_pred (Tensor): BBox deltas predictions of rois. each roi
predicts 4 * num_classes channels.
"""
# conv head
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), -1)
bbox_pred = self.fc_reg(x_conv)
# fc head
x_fc = x_cls.view(x_cls.size(0), -1)
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return cls_score, bbox_pred
| 6,769 | 32.85 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/shared_heads/res_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmengine.model import BaseModule
from mmdet.models.backbones import ResNet
from mmdet.models.layers import ResLayer as _ResLayer
from mmdet.registry import MODELS
@MODELS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
| 2,545 | 30.825 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/shared_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .res_layer import ResLayer
__all__ = ['ResLayer']
| 104 | 20 | 47 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/grid_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType
@MODELS.register_module()
class GridHead(BaseModule):
"""Implementation of `Grid Head <https://arxiv.org/abs/1811.12030>`_
Args:
grid_points (int): The number of grid points. Defaults to 9.
num_convs (int): The number of convolution layers. Defaults to 8.
roi_feat_size (int): RoI feature size. Default to 14.
in_channels (int): The channel number of inputs features.
Defaults to 256.
conv_kernel_size (int): The kernel size of convolution layers.
Defaults to 3.
point_feat_channels (int): The number of channels of each point
features. Defaults to 64.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Defaults to False.
loss_grid (:obj:`ConfigDict` or dict): Config of grid loss.
conv_cfg (:obj:`ConfigDict` or dict, optional) dictionary to
construct and config conv layer.
norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and
config norm layer.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
"""
def __init__(
self,
grid_points: int = 9,
num_convs: int = 8,
roi_feat_size: int = 14,
in_channels: int = 256,
conv_kernel_size: int = 3,
point_feat_channels: int = 64,
deconv_kernel_size: int = 4,
class_agnostic: bool = False,
loss_grid: ConfigType = dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15),
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='GN', num_groups=36),
init_cfg: MultiConfig = [
dict(type='Kaiming', layer=['Conv2d', 'Linear']),
dict(
type='Normal',
layer='ConvTranspose2d',
std=0.001,
override=dict(
type='Normal',
name='deconv2',
std=0.001,
bias=-np.log(0.99 / 0.01)))
]
) -> None:
super().__init__(init_cfg=init_cfg)
self.grid_points = grid_points
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.point_feat_channels = point_feat_channels
self.conv_out_channels = self.point_feat_channels * self.grid_points
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
assert self.grid_points >= 4
self.grid_size = int(np.sqrt(self.grid_points))
if self.grid_size * self.grid_size != self.grid_points:
raise ValueError('grid_points must be a square number')
# the predicted heatmap is half of whole_map_size
if not isinstance(self.roi_feat_size, int):
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
self.whole_map_size = self.roi_feat_size * 4
# compute point-wise sub-regions
self.sub_regions = self.calc_sub_regions()
self.convs = []
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
stride = 2 if i == 0 else 1
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=True))
self.convs = nn.Sequential(*self.convs)
self.deconv1 = nn.ConvTranspose2d(
self.conv_out_channels,
self.conv_out_channels,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
self.deconv2 = nn.ConvTranspose2d(
self.conv_out_channels,
grid_points,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
# find the 4-neighbor of each grid point
self.neighbor_points = []
grid_size = self.grid_size
for i in range(grid_size): # i-th column
for j in range(grid_size): # j-th row
neighbors = []
if i > 0: # left: (i - 1, j)
neighbors.append((i - 1) * grid_size + j)
if j > 0: # up: (i, j - 1)
neighbors.append(i * grid_size + j - 1)
if j < grid_size - 1: # down: (i, j + 1)
neighbors.append(i * grid_size + j + 1)
if i < grid_size - 1: # right: (i + 1, j)
neighbors.append((i + 1) * grid_size + j)
self.neighbor_points.append(tuple(neighbors))
# total edges in the grid
self.num_edges = sum([len(p) for p in self.neighbor_points])
self.forder_trans = nn.ModuleList() # first-order feature transition
self.sorder_trans = nn.ModuleList() # second-order feature transition
for neighbors in self.neighbor_points:
fo_trans = nn.ModuleList()
so_trans = nn.ModuleList()
for _ in range(len(neighbors)):
# each transition module consists of a 5x5 depth-wise conv and
# 1x1 conv.
fo_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
stride=1,
padding=2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
so_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
1,
2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
self.forder_trans.append(fo_trans)
self.sorder_trans.append(so_trans)
self.loss_grid = MODELS.build(loss_grid)
def forward(self, x: Tensor) -> Dict[str, Tensor]:
"""forward function of ``GridHead``.
Args:
x (Tensor): RoI features, has shape
(num_rois, num_channels, roi_feat_size, roi_feat_size).
Returns:
Dict[str, Tensor]: Return a dict including fused and unfused
heatmap.
"""
assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
# RoI feature transformation, downsample 2x
x = self.convs(x)
c = self.point_feat_channels
# first-order fusion
x_fo = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_fo[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_fo[i] = x_fo[i] + self.forder_trans[i][j](
x[:, point_idx * c:(point_idx + 1) * c])
# second-order fusion
x_so = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_so[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
# predicted heatmap with fused features
x2 = torch.cat(x_so, dim=1)
x2 = self.deconv1(x2)
x2 = F.relu(self.norm1(x2), inplace=True)
heatmap = self.deconv2(x2)
# predicted heatmap with original features (applicable during training)
if self.training:
x1 = x
x1 = self.deconv1(x1)
x1 = F.relu(self.norm1(x1), inplace=True)
heatmap_unfused = self.deconv2(x1)
else:
heatmap_unfused = heatmap
return dict(fused=heatmap, unfused=heatmap_unfused)
def calc_sub_regions(self) -> List[Tuple[float]]:
"""Compute point specific representation regions.
See `Grid R-CNN Plus <https://arxiv.org/abs/1906.05688>`_ for details.
"""
# to make it consistent with the original implementation, half_size
# is computed as 2 * quarter_size, which is smaller
half_size = self.whole_map_size // 4 * 2
sub_regions = []
for i in range(self.grid_points):
x_idx = i // self.grid_size
y_idx = i % self.grid_size
if x_idx == 0:
sub_x1 = 0
elif x_idx == self.grid_size - 1:
sub_x1 = half_size
else:
ratio = x_idx / (self.grid_size - 1) - 0.25
sub_x1 = max(int(ratio * self.whole_map_size), 0)
if y_idx == 0:
sub_y1 = 0
elif y_idx == self.grid_size - 1:
sub_y1 = half_size
else:
ratio = y_idx / (self.grid_size - 1) - 0.25
sub_y1 = max(int(ratio * self.whole_map_size), 0)
sub_regions.append(
(sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
return sub_regions
def get_targets(self, sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict) -> Tensor:
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.".
Args:
sampling_results (List[:obj:`SamplingResult`]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (:obj:`ConfigDict`): `train_cfg` of RCNN.
Returns:
Tensor: Grid heatmap targets.
"""
# mix all samples (across images) together.
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
dim=0).cpu()
pos_gt_bboxes = torch.cat(
[res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
assert pos_bboxes.shape == pos_gt_bboxes.shape
# expand pos_bboxes to 2x of original size
x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
num_rois = pos_bboxes.shape[0]
map_size = self.whole_map_size
# this is not the final target shape
targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
dtype=torch.float)
# pre-compute interpolation factors for all grid points.
# the first item is the factor of x-dim, and the second is y-dim.
# for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
factors = []
for j in range(self.grid_points):
x_idx = j // self.grid_size
y_idx = j % self.grid_size
factors.append((1 - x_idx / (self.grid_size - 1),
1 - y_idx / (self.grid_size - 1)))
radius = rcnn_train_cfg.pos_radius
radius2 = radius**2
for i in range(num_rois):
# ignore small bboxes
if (pos_bbox_ws[i] <= self.grid_size
or pos_bbox_hs[i] <= self.grid_size):
continue
# for each grid point, mark a small circle as positive
for j in range(self.grid_points):
factor_x, factor_y = factors[j]
gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
1 - factor_x) * pos_gt_bboxes[i, 2]
gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
1 - factor_y) * pos_gt_bboxes[i, 3]
cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
map_size)
cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
map_size)
for x in range(cx - radius, cx + radius + 1):
for y in range(cy - radius, cy + radius + 1):
if x >= 0 and x < map_size and y >= 0 and y < map_size:
if (x - cx)**2 + (y - cy)**2 <= radius2:
targets[i, j, y, x] = 1
# reduce the target heatmap size by a half
# proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
sub_targets = []
for i in range(self.grid_points):
sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
sub_targets = torch.cat(sub_targets, dim=1)
sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
return sub_targets
def loss(self, grid_pred: Tensor, sample_idx: Tensor,
sampling_results: List[SamplingResult],
rcnn_train_cfg: ConfigDict) -> dict:
"""Calculate the loss based on the features extracted by the grid head.
Args:
grid_pred (dict[str, Tensor]): Outputs of grid_head forward.
sample_idx (Tensor): The sampling index of ``grid_pred``.
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
Returns:
dict: A dictionary of loss and targets components.
"""
grid_targets = self.get_targets(sampling_results, rcnn_train_cfg)
grid_targets = grid_targets[sample_idx]
loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
loss_grid = loss_fused + loss_unfused
return dict(loss_grid=loss_grid)
def predict_by_feat(self,
grid_preds: Dict[str, Tensor],
results_list: List[InstanceData],
batch_img_metas: List[dict],
rescale: bool = False) -> InstanceList:
"""Adjust the predicted bboxes from bbox head.
Args:
grid_preds (dict[str, Tensor]): dictionary outputted by forward
function.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
batch_img_metas (list[dict]): List of image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape \
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4), the last \
dimension 4 arrange as (x1, y1, x2, y2).
"""
num_roi_per_img = tuple(res.bboxes.size(0) for res in results_list)
grid_preds = {
k: v.split(num_roi_per_img, 0)
for k, v in grid_preds.items()
}
for i, results in enumerate(results_list):
if len(results) != 0:
bboxes = self._predict_by_feat_single(
grid_pred=grid_preds['fused'][i],
bboxes=results.bboxes,
img_meta=batch_img_metas[i],
rescale=rescale)
results.bboxes = bboxes
return results_list
def _predict_by_feat_single(self,
grid_pred: Tensor,
bboxes: Tensor,
img_meta: dict,
rescale: bool = False) -> Tensor:
"""Adjust ``bboxes`` according to ``grid_pred``.
Args:
grid_pred (Tensor): Grid fused heatmap.
bboxes (Tensor): Predicted bboxes, has shape (n, 4)
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
Tensor: adjusted bboxes.
"""
assert bboxes.size(0) == grid_pred.size(0)
grid_pred = grid_pred.sigmoid()
R, c, h, w = grid_pred.shape
half_size = self.whole_map_size // 4 * 2
assert h == w == half_size
assert c == self.grid_points
# find the point with max scores in the half-sized heatmap
grid_pred = grid_pred.view(R * c, h * w)
pred_scores, pred_position = grid_pred.max(dim=1)
xs = pred_position % w
ys = pred_position // w
# get the position in the whole heatmap instead of half-sized heatmap
for i in range(self.grid_points):
xs[i::self.grid_points] += self.sub_regions[i][0]
ys[i::self.grid_points] += self.sub_regions[i][1]
# reshape to (num_rois, grid_points)
pred_scores, xs, ys = tuple(
map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
# get expanded pos_bboxes
widths = (bboxes[:, 2] - bboxes[:, 0]).unsqueeze(-1)
heights = (bboxes[:, 3] - bboxes[:, 1]).unsqueeze(-1)
x1 = (bboxes[:, 0, None] - widths / 2)
y1 = (bboxes[:, 1, None] - heights / 2)
# map the grid point to the absolute coordinates
abs_xs = (xs.float() + 0.5) / w * widths + x1
abs_ys = (ys.float() + 0.5) / h * heights + y1
# get the grid points indices that fall on the bbox boundaries
x1_inds = [i for i in range(self.grid_size)]
y1_inds = [i * self.grid_size for i in range(self.grid_size)]
x2_inds = [
self.grid_points - self.grid_size + i
for i in range(self.grid_size)
]
y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
# voting of all grid points on some boundary
bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
bboxes = torch.cat([bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2], dim=1)
bboxes[:, [0, 2]].clamp_(min=0, max=img_meta['img_shape'][1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_meta['img_shape'][0])
if rescale:
assert img_meta.get('scale_factor') is not None
bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat(
(1, 2))
return bboxes
| 20,849 | 41.464358 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
import torch.nn as nn
from mmengine.config import ConfigDict
from torch import Tensor
from mmdet.models.task_modules import SamplingResult
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, InstanceList, OptConfigType, reduce_mean
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class DynamicMaskHead(FCNMaskHead):
r"""Dynamic Mask Head for
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_
Args:
num_convs (int): Number of convolution layer.
Defaults to 4.
roi_feat_size (int): The output size of RoI extractor,
Defaults to 14.
in_channels (int): Input feature channels.
Defaults to 256.
conv_kernel_size (int): Kernel size of convolution layers.
Defaults to 3.
conv_out_channels (int): Output channels of convolution layers.
Defaults to 256.
num_classes (int): Number of classes.
Defaults to 80
class_agnostic (int): Whether generate class agnostic prediction.
Defaults to False.
dropout (float): Probability of drop the channel.
Defaults to 0.0
upsample_cfg (:obj:`ConfigDict` or dict): The config for
upsample layer.
conv_cfg (:obj:`ConfigDict` or dict, optional): The convolution
layer config.
norm_cfg (:obj:`ConfigDict` or dict, optional): The norm layer config.
dynamic_conv_cfg (:obj:`ConfigDict` or dict): The dynamic convolution
layer config.
loss_mask (:obj:`ConfigDict` or dict): The config for mask loss.
"""
def __init__(self,
num_convs: int = 4,
roi_feat_size: int = 14,
in_channels: int = 256,
conv_kernel_size: int = 3,
conv_out_channels: int = 256,
num_classes: int = 80,
class_agnostic: bool = False,
upsample_cfg: ConfigType = dict(
type='deconv', scale_factor=2),
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
dynamic_conv_cfg: ConfigType = dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=14,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_mask: ConfigType = dict(
type='DiceLoss', loss_weight=8.0),
**kwargs) -> None:
super().__init__(
num_convs=num_convs,
roi_feat_size=roi_feat_size,
in_channels=in_channels,
conv_kernel_size=conv_kernel_size,
conv_out_channels=conv_out_channels,
num_classes=num_classes,
class_agnostic=class_agnostic,
upsample_cfg=upsample_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
loss_mask=loss_mask,
**kwargs)
assert class_agnostic is False, \
'DynamicMaskHead only support class_agnostic=False'
self.fp16_enabled = False
self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg)
def init_weights(self) -> None:
"""Use xavier initialization for all weight parameter and set
classification head bias as a specific value when use focal loss."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.constant_(self.conv_logits.bias, 0.)
def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> Tensor:
"""Forward function of DynamicMaskHead.
Args:
roi_feat (Tensor): Roi-pooling features with shape
(batch_size*num_proposals, feature_dimensions,
pooling_h , pooling_w).
proposal_feat (Tensor): Intermediate feature get from
diihead in last stage, has shape
(batch_size*num_proposals, feature_dimensions)
Returns:
mask_preds (Tensor): Predicted foreground masks with shape
(batch_size*num_proposals, num_classes, pooling_h*2, pooling_w*2).
"""
proposal_feat = proposal_feat.reshape(-1, self.in_channels)
proposal_feat_iic = self.instance_interactive_conv(
proposal_feat, roi_feat)
x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size())
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_preds = self.conv_logits(x)
return mask_preds
def loss_and_target(self, mask_preds: Tensor,
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
rcnn_train_cfg: ConfigDict) -> dict:
"""Calculate the loss based on the features extracted by the mask head.
Args:
mask_preds (Tensor): Predicted foreground masks, has shape
(num_pos, num_classes, h, w).
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
Returns:
dict: A dictionary of loss and targets components.
"""
mask_targets = self.get_targets(
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
num_pos = pos_labels.new_ones(pos_labels.size()).float().sum()
avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item()
loss = dict()
if mask_preds.size(0) == 0:
loss_mask = mask_preds.sum()
else:
loss_mask = self.loss_mask(
mask_preds[torch.arange(num_pos).long(), pos_labels,
...].sigmoid(),
mask_targets,
avg_factor=avg_factor)
loss['loss_mask'] = loss_mask
return dict(loss_mask=loss, mask_targets=mask_targets)
| 6,739 | 39.359281 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Defaults to 0.
num_fcs (int): Number of fc layers in the head. Defaults to 2.
fc_out_channels (int): Number of output channels of fc layer.
Defaults to 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Defaults to 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs: int = 0,
num_fcs: int = 2,
fc_out_channels: int = 1024,
downsample_factor: int = 2,
init_cfg: MultiConfig = dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg) -> None:
super().__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self) -> None:
"""Initialize weights."""
super(FCNMaskHead, self).init_weights()
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_preds = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_preds
| 3,887 | 34.027027 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/maskiou_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import Conv2d, Linear, MaxPool2d
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from torch.nn.modules.utils import _pair
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, InstanceList, OptMultiConfig
@MODELS.register_module()
class MaskIoUHead(BaseModule):
"""Mask IoU Head.
This head predicts the IoU of predicted masks and corresponding gt masks.
Args:
num_convs (int): The number of convolution layers. Defaults to 4.
num_fcs (int): The number of fully connected layers. Defaults to 2.
roi_feat_size (int): RoI feature size. Default to 14.
in_channels (int): The channel number of inputs features.
Defaults to 256.
conv_out_channels (int): The feature channels of convolution layers.
Defaults to 256.
fc_out_channels (int): The feature channels of fully connected layers.
Defaults to 1024.
num_classes (int): Number of categories excluding the background
category. Defaults to 80.
loss_iou (:obj:`ConfigDict` or dict): IoU loss.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict.
"""
def __init__(
self,
num_convs: int = 4,
num_fcs: int = 2,
roi_feat_size: int = 14,
in_channels: int = 256,
conv_out_channels: int = 256,
fc_out_channels: int = 1024,
num_classes: int = 80,
loss_iou: ConfigType = dict(type='MSELoss', loss_weight=0.5),
init_cfg: OptMultiConfig = [
dict(type='Kaiming', override=dict(name='convs')),
dict(type='Caffe2Xavier', override=dict(name='fcs')),
dict(type='Normal', std=0.01, override=dict(name='fc_mask_iou'))
]
) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.num_classes = num_classes
self.convs = nn.ModuleList()
for i in range(num_convs):
if i == 0:
# concatenation of mask feature and mask prediction
in_channels = self.in_channels + 1
else:
in_channels = self.conv_out_channels
stride = 2 if i == num_convs - 1 else 1
self.convs.append(
Conv2d(
in_channels,
self.conv_out_channels,
3,
stride=stride,
padding=1))
roi_feat_size = _pair(roi_feat_size)
pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
self.fcs = nn.ModuleList()
for i in range(num_fcs):
in_channels = (
self.conv_out_channels *
pooled_area if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(in_channels, self.fc_out_channels))
self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU()
self.max_pool = MaxPool2d(2, 2)
self.loss_iou = MODELS.build(loss_iou)
def forward(self, mask_feat: Tensor, mask_preds: Tensor) -> Tensor:
"""Forward function.
Args:
mask_feat (Tensor): Mask features from upstream models.
mask_preds (Tensor): Mask predictions from mask head.
Returns:
Tensor: Mask IoU predictions.
"""
mask_preds = mask_preds.sigmoid()
mask_pred_pooled = self.max_pool(mask_preds.unsqueeze(1))
x = torch.cat((mask_feat, mask_pred_pooled), 1)
for conv in self.convs:
x = self.relu(conv(x))
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_iou = self.fc_mask_iou(x)
return mask_iou
def loss_and_target(self, mask_iou_pred: Tensor, mask_preds: Tensor,
mask_targets: Tensor,
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
rcnn_train_cfg: ConfigDict) -> dict:
"""Calculate the loss and targets of MaskIoUHead.
Args:
mask_iou_pred (Tensor): Mask IoU predictions results, has shape
(num_pos, num_classes)
mask_preds (Tensor): Mask predictions from mask head, has shape
(num_pos, mask_size, mask_size).
mask_targets (Tensor): The ground truth masks assigned with
predictions, has shape
(num_pos, mask_size, mask_size).
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It includes ``masks`` inside.
rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
Returns:
dict: A dictionary of loss and targets components.
The targets are only used for cascade rcnn.
"""
mask_iou_targets = self.get_targets(
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
mask_preds=mask_preds,
mask_targets=mask_targets,
rcnn_train_cfg=rcnn_train_cfg)
pos_inds = mask_iou_targets > 0
if pos_inds.sum() > 0:
loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
mask_iou_targets[pos_inds])
else:
loss_mask_iou = mask_iou_pred.sum() * 0
return dict(loss_mask_iou=loss_mask_iou)
def get_targets(self, sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList, mask_preds: Tensor,
mask_targets: Tensor,
rcnn_train_cfg: ConfigDict) -> Tensor:
"""Compute target of mask IoU.
Mask IoU target is the IoU of the predicted mask (inside a bbox) and
the gt mask of corresponding gt mask (the whole instance).
The intersection area is computed inside the bbox, and the gt mask area
is computed with two steps, firstly we compute the gt area inside the
bbox, then divide it by the area ratio of gt area inside the bbox and
the gt area of the whole instance.
Args:
sampling_results (list[:obj:`SamplingResult`]): sampling results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It includes ``masks`` inside.
mask_preds (Tensor): Predicted masks of each positive proposal,
shape (num_pos, h, w).
mask_targets (Tensor): Gt mask of each positive proposal,
binary map of the shape (num_pos, h, w).
rcnn_train_cfg (obj:`ConfigDict`): Training config for R-CNN part.
Returns:
Tensor: mask iou target (length == num positive).
"""
pos_proposals = [res.pos_priors for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
gt_masks = [res.masks for res in batch_gt_instances]
# compute the area ratio of gt areas inside the proposals and
# the whole instance
area_ratios = map(self._get_area_ratio, pos_proposals,
pos_assigned_gt_inds, gt_masks)
area_ratios = torch.cat(list(area_ratios))
assert mask_targets.size(0) == area_ratios.size(0)
mask_preds = (mask_preds > rcnn_train_cfg.mask_thr_binary).float()
mask_pred_areas = mask_preds.sum((-1, -2))
# mask_preds and mask_targets are binary maps
overlap_areas = (mask_preds * mask_targets).sum((-1, -2))
# compute the mask area of the whole instance
gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
mask_iou_targets = overlap_areas / (
mask_pred_areas + gt_full_areas - overlap_areas)
return mask_iou_targets
def _get_area_ratio(self, pos_proposals: Tensor,
pos_assigned_gt_inds: Tensor,
gt_masks: InstanceData) -> Tensor:
"""Compute area ratio of the gt mask inside the proposal and the gt
mask of the corresponding instance.
Args:
pos_proposals (Tensor): Positive proposals, has shape (num_pos, 4).
pos_assigned_gt_inds (Tensor): positive proposals assigned ground
truth index.
gt_masks (BitmapMask or PolygonMask): Gt masks (the whole instance)
of each image, with the same shape of the input image.
Returns:
Tensor: The area ratio of the gt mask inside the proposal and the
gt mask of the corresponding instance.
"""
num_pos = pos_proposals.size(0)
if num_pos > 0:
area_ratios = []
proposals_np = pos_proposals.cpu().numpy()
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
# compute mask areas of gt instances (batch processing for speedup)
gt_instance_mask_area = gt_masks.areas
for i in range(num_pos):
gt_mask = gt_masks[pos_assigned_gt_inds[i]]
# crop the gt mask inside the proposal
bbox = proposals_np[i, :].astype(np.int32)
gt_mask_in_proposal = gt_mask.crop(bbox)
ratio = gt_mask_in_proposal.areas[0] / (
gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
area_ratios.append(ratio)
area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
pos_proposals.device)
else:
area_ratios = pos_proposals.new_zeros((0, ))
return area_ratios
def predict_by_feat(self, mask_iou_preds: Tuple[Tensor],
results_list: InstanceList) -> InstanceList:
"""Predict the mask iou and calculate it into ``results.scores``.
Args:
mask_iou_preds (Tensor): Mask IoU predictions results, has shape
(num_proposals, num_classes)
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert len(mask_iou_preds) == len(results_list)
for results, mask_iou_pred in zip(results_list, mask_iou_preds):
labels = results.labels
scores = results.scores
results.scores = scores * mask_iou_pred[range(labels.size(0)),
labels]
return results_list
| 11,667 | 40.971223 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fused_semantic_head import FusedSemanticHead
@MODELS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
| 988 | 33.103448 | 72 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig
@MODELS.register_module()
class FeatureRelayHead(BaseModule):
"""Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
in_channels (int): number of input channels. Defaults to 256.
conv_out_channels (int): number of output channels before
classification layer. Defaults to 256.
roi_feat_size (int): roi feat size at box head. Default: 7.
scale_factor (int): scale factor to match roi feat size
at mask head. Defaults to 2.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict. Defaults to
dict(type='Kaiming', layer='Linear').
"""
def __init__(
self,
in_channels: int = 1024,
out_conv_channels: int = 256,
roi_feat_size: int = 7,
scale_factor: int = 2,
init_cfg: MultiConfig = dict(type='Kaiming', layer='Linear')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(roi_feat_size, int)
self.in_channels = in_channels
self.out_conv_channels = out_conv_channels
self.roi_feat_size = roi_feat_size
self.out_channels = (roi_feat_size**2) * out_conv_channels
self.scale_factor = scale_factor
self.fp16_enabled = False
self.fc = nn.Linear(self.in_channels, self.out_channels)
self.upsample = nn.Upsample(
scale_factor=scale_factor, mode='bilinear', align_corners=True)
def forward(self, x: Tensor) -> Optional[Tensor]:
"""Forward function.
Args:
x (Tensor): Input feature.
Returns:
Optional[Tensor]: Output feature. When the first dim of input is
0, None is returned.
"""
N, _ = x.shape
if N > 0:
out_C = self.out_conv_channels
out_HW = self.roi_feat_size
x = self.fc(x)
x = x.reshape(N, out_C, out_HW, out_HW)
x = self.upsample(x)
return x
return None
| 2,295 | 32.275362 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/global_context_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Defaults to 4.
in_channels (int, optional): number of input channels. Defaults to 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Defaults to 256.
num_classes (int, optional): number of classes. Defaults to 80.
loss_weight (float, optional): global context loss weight.
Defaults to 1.
conv_cfg (dict, optional): config to init conv layer. Defaults to None.
norm_cfg (dict, optional): config to init norm layer. Defaults to None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection.
Defaults to False.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict. Defaults to
dict(type='Normal', std=0.01, override=dict(name='fc')).
"""
def __init__(
self,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 80,
loss_weight: float = 1.0,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
conv_to_res: bool = False,
init_cfg: MultiConfig = dict(
type='Normal', std=0.01, override=dict(name='fc'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (Tuple[Tensor]): Multi-scale feature maps.
Returns:
Tuple[Tensor]:
- mc_pred (Tensor): Multi-class prediction.
- x (Tensor): Global context feature.
"""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor:
"""Loss function.
Args:
pred (Tensor): Logits.
labels (list[Tensor]): Grouth truths.
Returns:
Tensor: Loss.
"""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
| 4,458 | 33.835938 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coarse_mask_head import CoarseMaskHead
from .dynamic_mask_head import DynamicMaskHead
from .fcn_mask_head import FCNMaskHead
from .feature_relay_head import FeatureRelayHead
from .fused_semantic_head import FusedSemanticHead
from .global_context_head import GlobalContextHead
from .grid_head import GridHead
from .htc_mask_head import HTCMaskHead
from .mask_point_head import MaskPointHead
from .maskiou_head import MaskIoUHead
from .scnet_mask_head import SCNetMaskHead
from .scnet_semantic_head import SCNetSemanticHead
__all__ = [
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead',
'DynamicMaskHead'
]
| 817 | 37.952381 | 70 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/htc_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
from mmcv.cnn import ConvModule
from torch import Tensor
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class HTCMaskHead(FCNMaskHead):
"""Mask head for HTC.
Args:
with_conv_res (bool): Whether add conv layer for ``res_feat``.
Defaults to True.
"""
def __init__(self, with_conv_res: bool = True, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self,
x: Tensor,
res_feat: Optional[Tensor] = None,
return_logits: bool = True,
return_feat: bool = True) -> Union[Tensor, List[Tensor]]:
"""
Args:
x (Tensor): Feature map.
res_feat (Tensor, optional): Feature for residual connection.
Defaults to None.
return_logits (bool): Whether return mask logits. Defaults to True.
return_feat (bool): Whether return feature map. Defaults to True.
Returns:
Union[Tensor, List[Tensor]]: The return result is one of three
results: res_feat, logits, or [logits, res_feat].
"""
assert not (not return_logits and not return_feat)
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_preds = self.conv_logits(x)
outs.append(mask_preds)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
| 2,190 | 32.19697 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmengine.config import ConfigDict
from mmengine.model import BaseModule, ModuleList
from mmengine.structures import InstanceData
from torch import Tensor
from torch.nn.modules.utils import _pair
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.utils import empty_instances
from mmdet.registry import MODELS
from mmdet.structures.mask import mask_target
from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@MODELS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs: int = 4,
roi_feat_size: int = 14,
in_channels: int = 256,
conv_kernel_size: int = 3,
conv_out_channels: int = 256,
num_classes: int = 80,
class_agnostic: int = False,
upsample_cfg: ConfigType = dict(
type='deconv', scale_factor=2),
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
predictor_cfg: ConfigType = dict(type='Conv'),
loss_mask: ConfigType = dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.loss_mask = MODELS.build(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self) -> None:
"""Initialize the weights."""
super().init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
elif hasattr(m, 'weight') and hasattr(m, 'bias'):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_preds = self.conv_logits(x)
return mask_preds
def get_targets(self, sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
rcnn_train_cfg: ConfigDict) -> Tensor:
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Args:
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
Returns:
Tensor: Mask target of each positive proposals in the image.
"""
pos_proposals = [res.pos_priors for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
gt_masks = [res.masks for res in batch_gt_instances]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
def loss_and_target(self, mask_preds: Tensor,
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
rcnn_train_cfg: ConfigDict) -> dict:
"""Calculate the loss based on the features extracted by the mask head.
Args:
mask_preds (Tensor): Predicted foreground masks, has shape
(num_pos, num_classes, h, w).
sampling_results (List[obj:SamplingResult]): Assign results of
all images in a batch after sampling.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
Returns:
dict: A dictionary of loss and targets components.
"""
mask_targets = self.get_targets(
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss = dict()
if mask_preds.size(0) == 0:
loss_mask = mask_preds.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_preds, mask_targets,
torch.zeros_like(pos_labels))
else:
loss_mask = self.loss_mask(mask_preds, mask_targets,
pos_labels)
loss['loss_mask'] = loss_mask
# TODO: which algorithm requires mask_targets?
return dict(loss_mask=loss, mask_targets=mask_targets)
def predict_by_feat(self,
mask_preds: Tuple[Tensor],
results_list: List[InstanceData],
batch_img_metas: List[dict],
rcnn_test_cfg: ConfigDict,
rescale: bool = False,
activate_map: bool = False) -> InstanceList:
"""Transform a batch of output features extracted from the head into
mask results.
Args:
mask_preds (tuple[Tensor]): Tuple of predicted foreground masks,
each has shape (n, num_classes, h, w).
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
activate_map (book): Whether get results with augmentations test.
If True, the `mask_preds` will not process with sigmoid.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert len(mask_preds) == len(results_list) == len(batch_img_metas)
for img_id in range(len(batch_img_metas)):
img_meta = batch_img_metas[img_id]
results = results_list[img_id]
bboxes = results.bboxes
if bboxes.shape[0] == 0:
results_list[img_id] = empty_instances(
[img_meta],
bboxes.device,
task_type='mask',
instance_results=[results],
mask_thr_binary=rcnn_test_cfg.mask_thr_binary)[0]
else:
im_mask = self._predict_by_feat_single(
mask_preds=mask_preds[img_id],
bboxes=bboxes,
labels=results.labels,
img_meta=img_meta,
rcnn_test_cfg=rcnn_test_cfg,
rescale=rescale,
activate_map=activate_map)
results.masks = im_mask
return results_list
def _predict_by_feat_single(self,
mask_preds: Tensor,
bboxes: Tensor,
labels: Tensor,
img_meta: dict,
rcnn_test_cfg: ConfigDict,
rescale: bool = False,
activate_map: bool = False) -> Tensor:
"""Get segmentation masks from mask_preds and bboxes.
Args:
mask_preds (Tensor): Predicted foreground masks, has shape
(n, num_classes, h, w).
bboxes (Tensor): Predicted bboxes, has shape (n, 4)
labels (Tensor): Labels of bboxes, has shape (n, )
img_meta (dict): image information.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
activate_map (book): Whether get results with augmentations test.
If True, the `mask_preds` will not process with sigmoid.
Defaults to False.
Returns:
Tensor: Encoded masks, has shape (n, img_w, img_h)
Example:
>>> from mmengine.config import Config
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_preds = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = (1, 1)
>>> rescale = False
>>> img_meta = {'scale_factor': scale_factor,
... 'ori_shape': ori_shape}
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self._get_seg_masks_single(
... mask_preds, bboxes, labels,
... img_meta, rcnn_test_cfg, rescale)
>>> assert encoded_masks.size()[0] == N
>>> assert encoded_masks.size()[1:] == ori_shape
"""
scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat(
(1, 2))
img_h, img_w = img_meta['ori_shape'][:2]
device = bboxes.device
if not activate_map:
mask_preds = mask_preds.sigmoid()
else:
# In AugTest, has been activated before
mask_preds = bboxes.new_tensor(mask_preds)
if rescale: # in-placed rescale the bboxes
bboxes /= scale_factor
else:
w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1]
img_h = np.round(img_h * h_scale.item()).astype(np.int32)
img_w = np.round(img_w * w_scale.item()).astype(np.int32)
N = len(mask_preds)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
# the types of img_w and img_h are np.int32,
# when the image resolution is large,
# the calculation of num_chunks will overflow.
# so we need to change the types of img_w and img_h to int.
# See https://github.com/open-mmlab/mmdetection/pull/5191
num_chunks = int(
np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /
GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_preds = mask_preds[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_preds[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
return im_mask
def _do_paste_mask(masks: Tensor,
boxes: Tensor,
img_h: int,
img_w: int,
skip_empty: bool = True) -> tuple:
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| 20,127 | 41.374737 | 85 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(
self,
num_ins: int,
fusion_level: int,
seg_scale_factor=1 / 8,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 183,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
ignore_label: int = None,
loss_weight: float = None,
loss_seg: ConfigDict = dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),
init_cfg: MultiConfig = dict(
type='Kaiming', override=dict(name='conv_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.seg_scale_factor = seg_scale_factor
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = MODELS.build(loss_seg)
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (tuple[Tensor]): Multi scale feature maps.
Returns:
tuple[Tensor]:
- mask_preds (Tensor): Predicted mask logits.
- x (Tensor): Fused feature.
"""
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_preds = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_preds, x
def loss(self, mask_preds: Tensor, labels: Tensor) -> Tensor:
"""Loss function.
Args:
mask_preds (Tensor): Predicted mask logits.
labels (Tensor): Ground truth.
Returns:
Tensor: Semantic segmentation loss.
"""
labels = F.interpolate(
labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_preds, labels)
return loss_semantic_seg
| 4,978 | 33.337931 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/mask_point_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
from typing import List, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.utils import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from mmdet.registry import MODELS
from mmdet.structures.bbox import bbox2roi
from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType
@MODELS.register_module()
class MaskPointHead(BaseModule):
"""A mask point head use in PointRend.
``MaskPointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Defaults to 3.
in_channels (int): Number of input channels. Defaults to 256.
fc_channels (int): Number of fc channels. Defaults to 256.
num_classes (int): Number of classes for logits. Defaults to 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Defaults to False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Defaults to True.
conv_cfg (:obj:`ConfigDict` or dict): Dictionary to construct
and config conv layer. Defaults to dict(type='Conv1d')).
norm_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct
and config norm layer. Defaults to None.
loss_point (:obj:`ConfigDict` or dict): Dictionary to construct and
config loss layer of point head. Defaults to
dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict.
"""
def __init__(
self,
num_classes: int,
num_fcs: int = 3,
in_channels: int = 256,
fc_channels: int = 256,
class_agnostic: bool = False,
coarse_pred_each_layer: bool = True,
conv_cfg: ConfigType = dict(type='Conv1d'),
norm_cfg: OptConfigType = None,
act_cfg: ConfigType = dict(type='ReLU'),
loss_point: ConfigType = dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg: MultiConfig = dict(
type='Normal', std=0.001, override=dict(name='fc_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_fcs = num_fcs
self.in_channels = in_channels
self.fc_channels = fc_channels
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.coarse_pred_each_layer = coarse_pred_each_layer
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.loss_point = MODELS.build(loss_point)
fc_in_channels = in_channels + num_classes
self.fcs = nn.ModuleList()
for _ in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
out_channels = 1 if self.class_agnostic else self.num_classes
self.fc_logits = nn.Conv1d(
fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, fine_grained_feats: Tensor,
coarse_feats: Tensor) -> Tensor:
"""Classify each point base on fine grained and coarse feats.
Args:
fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
shape (num_rois, in_channels, num_points).
coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
shape (num_rois, num_classes, num_points).
Returns:
Tensor: Point classification results,
shape (num_rois, num_class, num_points).
"""
x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_feats), dim=1)
return self.fc_logits(x)
def get_targets(self, rois: Tensor, rel_roi_points: Tensor,
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
cfg: ConfigType) -> Tensor:
"""Get training targets of MaskPointHead for all images.
Args:
rois (Tensor): Region of Interest, shape (num_rois, 5).
rel_roi_points (Tensor): Points coordinates relative to RoI, shape
(num_rois, num_points, 2).
sampling_results (:obj:`SamplingResult`): Sampling result after
sampling and assignment.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
cfg (obj:`ConfigDict` or dict): Training cfg.
Returns:
Tensor: Point target, shape (num_rois, num_points).
"""
num_imgs = len(sampling_results)
rois_list = []
rel_roi_points_list = []
for batch_ind in range(num_imgs):
inds = (rois[:, 0] == batch_ind)
rois_list.append(rois[inds])
rel_roi_points_list.append(rel_roi_points[inds])
pos_assigned_gt_inds_list = [
res.pos_assigned_gt_inds for res in sampling_results
]
cfg_list = [cfg for _ in range(num_imgs)]
point_targets = map(self._get_targets_single, rois_list,
rel_roi_points_list, pos_assigned_gt_inds_list,
batch_gt_instances, cfg_list)
point_targets = list(point_targets)
if len(point_targets) > 0:
point_targets = torch.cat(point_targets)
return point_targets
def _get_targets_single(self, rois: Tensor, rel_roi_points: Tensor,
pos_assigned_gt_inds: Tensor,
gt_instances: InstanceData,
cfg: ConfigType) -> Tensor:
"""Get training target of MaskPointHead for each image."""
num_pos = rois.size(0)
num_points = cfg.num_points
if num_pos > 0:
gt_masks_th = (
gt_instances.masks.to_tensor(rois.dtype,
rois.device).index_select(
0, pos_assigned_gt_inds))
gt_masks_th = gt_masks_th.unsqueeze(1)
rel_img_points = rel_roi_point_to_rel_img_point(
rois, rel_roi_points, gt_masks_th)
point_targets = point_sample(gt_masks_th,
rel_img_points).squeeze(1)
else:
point_targets = rois.new_zeros((0, num_points))
return point_targets
def loss_and_target(self, point_pred: Tensor, rel_roi_points: Tensor,
sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList,
cfg: ConfigType) -> dict:
"""Calculate loss for MaskPointHead.
Args:
point_pred (Tensor): Point predication result, shape
(num_rois, num_classes, num_points).
rel_roi_points (Tensor): Points coordinates relative to RoI, shape
(num_rois, num_points, 2).
sampling_results (:obj:`SamplingResult`): Sampling result after
sampling and assignment.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
cfg (obj:`ConfigDict` or dict): Training cfg.
Returns:
dict: a dictionary of point loss and point target.
"""
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
point_target = self.get_targets(rois, rel_roi_points, sampling_results,
batch_gt_instances, cfg)
if self.class_agnostic:
loss_point = self.loss_point(point_pred, point_target,
torch.zeros_like(pos_labels))
else:
loss_point = self.loss_point(point_pred, point_target, pos_labels)
return dict(loss_point=loss_point, point_target=point_target)
def get_roi_rel_points_train(self, mask_preds: Tensor, labels: Tensor,
cfg: ConfigType) -> Tensor:
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'_get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_preds (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (Tensor): The ground truth class for each instance.
cfg (:obj:`ConfigDict` or dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
point_coords = get_uncertain_point_coords_with_randomness(
mask_preds, labels, cfg.num_points, cfg.oversample_ratio,
cfg.importance_sample_ratio)
return point_coords
def get_roi_rel_points_test(self, mask_preds: Tensor, label_preds: Tensor,
cfg: ConfigType) -> Tuple[Tensor, Tensor]:
"""Get ``num_points`` most uncertain points during test.
Args:
mask_preds (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
label_preds (Tensor): The predication class for each instance.
cfg (:obj:`ConfigDict` or dict): Testing config of point head.
Returns:
tuple:
- point_indices (Tensor): A tensor of shape (num_rois, num_points)
that contains indices from [0, mask_height x mask_width) of the
most uncertain points.
- point_coords (Tensor): A tensor of shape (num_rois, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid.
"""
num_points = cfg.subdivision_num_points
uncertainty_map = get_uncertainty(mask_preds, label_preds)
num_rois, _, mask_height, mask_width = uncertainty_map.shape
# During ONNX exporting, the type of each elements of 'shape' is
# `Tensor(float)`, while it is `float` during PyTorch inference.
if isinstance(mask_height, torch.Tensor):
h_step = 1.0 / mask_height.float()
w_step = 1.0 / mask_width.float()
else:
h_step = 1.0 / mask_height
w_step = 1.0 / mask_width
# cast to int to avoid dynamic K for TopK op in ONNX
mask_size = int(mask_height * mask_width)
uncertainty_map = uncertainty_map.view(num_rois, mask_size)
num_points = min(mask_size, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step
ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step
point_coords = torch.stack([xs, ys], dim=2)
return point_indices, point_coords
| 12,664 | 43.438596 | 126 |
py
|
ERD
|
ERD-main/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
| 973 | 32.586207 | 72 |
py
|
ERD
|
ERD-main/mmdet/models/losses/ghm_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@MODELS.register_module()
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
bins=10,
momentum=0,
use_sigmoid=True,
loss_weight=1.0,
reduction='mean'):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-6
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self,
pred,
target,
label_weight,
reduction_override=None,
**kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_onehot_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none')
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@MODELS.register_module()
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
mu=0.02,
bins=10,
momentum=0,
loss_weight=1.0,
reduction='mean'):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
self.reduction = reduction
# TODO: support reduction parameter
def forward(self,
pred,
target,
label_weight,
avg_factor=None,
reduction_override=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
| 7,928 | 36.051402 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/mse_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred: Tensor, target: Tensor) -> Tensor:
"""A Wrapper of MSE loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
Returns:
Tensor: loss Tensor
"""
return F.mse_loss(pred, target, reduction='none')
@MODELS.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function of loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
| 2,267 | 31.4 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/losses/dice_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weight_reduce_loss
def dice_loss(pred,
target,
weight=None,
eps=1e-3,
reduction='mean',
naive_dice=False,
avg_factor=None):
"""Calculate dice loss, there are two forms of dice loss is supported:
- the one proposed in `V-Net: Fully Convolutional Neural
Networks for Volumetric Medical Image Segmentation
<https://arxiv.org/abs/1606.04797>`_.
- the dice loss in which the power of the number in the
denominator is the first power instead of the second
power.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *)
target (torch.Tensor): The learning label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-3.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
Options are "none", "mean" and "sum".
naive_dice (bool, optional): If false, use the dice
loss defined in the V-Net paper, otherwise, use the
naive dice loss in which the power of the number in the
denominator is the first power instead of the second
power.Defaults to False.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
input = pred.flatten(1)
target = target.flatten(1).float()
a = torch.sum(input * target, 1)
if naive_dice:
b = torch.sum(input, 1)
c = torch.sum(target, 1)
d = (2 * a + eps) / (b + c + eps)
else:
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
loss = 1 - d
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(pred)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@MODELS.register_module()
class DiceLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
activate=True,
reduction='mean',
naive_dice=False,
loss_weight=1.0,
eps=1e-3):
"""Compute dice loss.
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
activate (bool): Whether to activate the predictions inside,
this will disable the inside sigmoid operation.
Defaults to True.
reduction (str, optional): The method used
to reduce the loss. Options are "none",
"mean" and "sum". Defaults to 'mean'.
naive_dice (bool, optional): If false, use the dice
loss defined in the V-Net paper, otherwise, use the
naive dice loss in which the power of the number in the
denominator is the first power instead of the second
power. Defaults to False.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
eps (float): Avoid dividing by zero. Defaults to 1e-3.
"""
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.naive_dice = naive_dice
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self,
pred,
target,
weight=None,
reduction_override=None,
avg_factor=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *).
target (torch.Tensor): The label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = self.loss_weight * dice_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
naive_dice=self.naive_dice,
avg_factor=avg_factor)
return loss
| 5,329 | 35.258503 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/losses/pisa_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.structures.bbox import bbox_overlaps
from ..task_modules.coders import BaseBBoxCoder
from ..task_modules.samplers import SamplingResult
def isr_p(cls_score: Tensor,
bbox_pred: Tensor,
bbox_targets: Tuple[Tensor],
rois: Tensor,
sampling_results: List[SamplingResult],
loss_cls: nn.Module,
bbox_coder: BaseBBoxCoder,
k: float = 2,
bias: float = 0,
num_class: int = 80) -> tuple:
"""Importance-based Sample Reweighting (ISR_P), positive part.
Args:
cls_score (Tensor): Predicted classification scores.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are
labels, label_weights, bbox_targets, bbox_weights, respectively.
rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs
(two_stage) in shape (n, 5).
sampling_results (:obj:`SamplingResult`): Sampling results.
loss_cls (:obj:`nn.Module`): Classification loss func of the head.
bbox_coder (:obj:`BaseBBoxCoder`): BBox coder of the head.
k (float): Power of the non-linear mapping. Defaults to 2.
bias (float): Shift of the non-linear mapping. Defaults to 0.
num_class (int): Number of classes, defaults to 80.
Return:
tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,
bbox_target_weights
"""
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
pos_labels = labels[pos_label_inds]
# if no positive samples, return the original targets
num_pos = float(pos_label_inds.size(0))
if num_pos == 0:
return labels, label_weights, bbox_targets, bbox_weights
# merge pos_assigned_gt_inds of per image to a single tensor
gts = list()
last_max_gt = 0
for i in range(len(sampling_results)):
gt_i = sampling_results[i].pos_assigned_gt_inds
gts.append(gt_i + last_max_gt)
if len(gt_i) != 0:
last_max_gt = gt_i.max() + 1
gts = torch.cat(gts)
assert len(gts) == num_pos
cls_score = cls_score.detach()
bbox_pred = bbox_pred.detach()
# For single stage detectors, rois here indicate anchors, in shape (N, 4)
# For two stage detectors, rois are in shape (N, 5)
if rois.size(-1) == 5:
pos_rois = rois[pos_label_inds][:, 1:]
else:
pos_rois = rois[pos_label_inds]
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)
else:
pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)
# compute iou of the predicted bbox and the corresponding GT
pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)
pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)
target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)
ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)
pos_imp_weights = label_weights[pos_label_inds]
# Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,
# then sorted again within the same-rank group
max_l_num = pos_labels.bincount().max()
for label in pos_labels.unique():
l_inds = (pos_labels == label).nonzero().view(-1)
l_gts = gts[l_inds]
for t in l_gts.unique():
t_inds = l_inds[l_gts == t]
t_ious = ious[t_inds]
_, t_iou_rank_idx = t_ious.sort(descending=True)
_, t_iou_rank = t_iou_rank_idx.sort()
ious[t_inds] += max_l_num - t_iou_rank.float()
l_ious = ious[l_inds]
_, l_iou_rank_idx = l_ious.sort(descending=True)
_, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR
# linearly map HLR to label weights
pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num
pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)
# normalize to make the new weighted loss value equal to the original loss
pos_loss_cls = loss_cls(
cls_score[pos_label_inds], pos_labels, reduction_override='none')
if pos_loss_cls.dim() > 1:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,
None]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]
else:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights
pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()
pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio
label_weights[pos_label_inds] = pos_imp_weights
bbox_targets = labels, label_weights, bbox_targets, bbox_weights
return bbox_targets
def carl_loss(cls_score: Tensor,
labels: Tensor,
bbox_pred: Tensor,
bbox_targets: Tensor,
loss_bbox: nn.Module,
k: float = 1,
bias: float = 0.2,
avg_factor: Optional[int] = None,
sigmoid: bool = False,
num_class: int = 80) -> dict:
"""Classification-Aware Regression Loss (CARL).
Args:
cls_score (Tensor): Predicted classification scores.
labels (Tensor): Targets of classification.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (Tensor): Target of bbox regression.
loss_bbox (func): Regression loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping. Defaults to 1.
bias (float): Shift of the non-linear mapping. Defaults to 0.2.
avg_factor (int, optional): Average factor used in regression loss.
sigmoid (bool): Activation of the classification score.
num_class (int): Number of classes, defaults to 80.
Return:
dict: CARL loss dict.
"""
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
if pos_label_inds.numel() == 0:
return dict(loss_carl=cls_score.sum()[None] * 0.)
pos_labels = labels[pos_label_inds]
# multiply pos_cls_score with the corresponding bbox weight
# and remain gradient
if sigmoid:
pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]
else:
pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]
carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)
# normalize carl_loss_weight to make its sum equal to num positive
num_pos = float(pos_cls_score.size(0))
weight_ratio = num_pos / carl_loss_weights.sum()
carl_loss_weights *= weight_ratio
if avg_factor is None:
avg_factor = bbox_targets.size(0)
# if is class agnostic, bbox pred is in shape (N, 4)
# otherwise, bbox pred is in shape (N, #classes, 4)
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]
else:
pos_bbox_preds = bbox_pred[pos_label_inds]
ori_loss_reg = loss_bbox(
pos_bbox_preds,
bbox_targets[pos_label_inds],
reduction_override='none') / avg_factor
loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()
return dict(loss_carl=loss_carl[None])
| 7,670 | 39.803191 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/balanced_l1_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@MODELS.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| 4,205 | 33.195122 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/iou_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures.bbox import bbox_overlaps
from .utils import weighted_loss
@weighted_loss
def iou_loss(pred: Tensor,
target: Tensor,
linear: bool = False,
mode: str = 'log',
eps: float = 1e-6) -> Tensor:
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'iou_loss is deprecated, please use "mode=`linear`" '
'instead.')
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if mode == 'linear':
loss = 1 - ious
elif mode == 'square':
loss = 1 - ious**2
elif mode == 'log':
loss = -ious.log()
else:
raise NotImplementedError
return loss
@weighted_loss
def bounded_iou_loss(pred: Tensor,
target: Tensor,
beta: float = 0.2,
eps: float = 1e-3) -> Tensor:
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
beta (float, optional): Beta parameter in smoothl1.
eps (float, optional): Epsilon to avoid NaN values.
Return:
Tensor: Loss tensor.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
# view(..., -1) does not work for empty tensor
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).flatten(1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
@weighted_loss
def giou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
@weighted_loss
def diou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:
r"""Implementation of `Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss
@weighted_loss
def ciou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor:
r"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = (ious > 0.5).float() * v / (1 - ious + v)
# CIoU
cious = ious - (rho2 / c2 + alpha * v)
loss = 1 - cious.clamp(min=-1.0, max=1.0)
return loss
@weighted_loss
def eiou_loss(pred: Tensor,
target: Tensor,
smooth_point: float = 0.1,
eps: float = 1e-7) -> Tensor:
r"""Implementation of paper `Extended-IoU Loss: A Systematic
IoU-Related Method: Beyond Simplified Regression for Better
Localization <https://ieeexplore.ieee.org/abstract/document/9429909>`_
Code is modified from https://github.com//ShiqiYu/libfacedetection.train.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
smooth_point (float): hyperparameter, default is 0.1.
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
px1, py1, px2, py2 = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]
tx1, ty1, tx2, ty2 = target[:, 0], target[:, 1], target[:, 2], target[:, 3]
# extent top left
ex1 = torch.min(px1, tx1)
ey1 = torch.min(py1, ty1)
# intersection coordinates
ix1 = torch.max(px1, tx1)
iy1 = torch.max(py1, ty1)
ix2 = torch.min(px2, tx2)
iy2 = torch.min(py2, ty2)
# extra
xmin = torch.min(ix1, ix2)
ymin = torch.min(iy1, iy2)
xmax = torch.max(ix1, ix2)
ymax = torch.max(iy1, iy2)
# Intersection
intersection = (ix2 - ex1) * (iy2 - ey1) + (xmin - ex1) * (ymin - ey1) - (
ix1 - ex1) * (ymax - ey1) - (xmax - ex1) * (
iy1 - ey1)
# Union
union = (px2 - px1) * (py2 - py1) + (tx2 - tx1) * (
ty2 - ty1) - intersection + eps
# IoU
ious = 1 - (intersection / union)
# Smooth-EIoU
smooth_sign = (ious < smooth_point).detach().float()
loss = 0.5 * smooth_sign * (ious**2) / smooth_point + (1 - smooth_sign) * (
ious - 0.5 * smooth_point)
return loss
@MODELS.register_module()
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss else determined
by mode. Default: False.
eps (float): Epsilon to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
"""
def __init__(self,
linear: bool = False,
eps: float = 1e-6,
reduction: str = 'mean',
loss_weight: float = 1.0,
mode: str = 'log') -> None:
super().__init__()
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'IOULoss is deprecated, please use "mode=`linear`" '
'instead.')
self.mode = mode
self.linear = linear
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): The learning target of the prediction,
shape (n, 4).
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
Return:
Tensor: Loss tensor.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if (weight is not None) and (not torch.any(weight > 0)) and (
reduction != 'none'):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# iou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
mode=self.mode,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@MODELS.register_module()
class BoundedIoULoss(nn.Module):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
beta (float, optional): Beta parameter in smoothl1.
eps (float, optional): Epsilon to avoid NaN values.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self,
beta: float = 0.2,
eps: float = 1e-3,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.beta = beta
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): The learning target of the prediction,
shape (n, 4).
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
reduction_override (Optional[str], optional): The reduction method
used to override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
Returns:
Tensor: Loss tensor.
"""
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * bounded_iou_loss(
pred,
target,
weight,
beta=self.beta,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@MODELS.register_module()
class GIoULoss(nn.Module):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
eps (float): Epsilon to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self,
eps: float = 1e-6,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): The learning target of the prediction,
shape (n, 4).
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
reduction_override (Optional[str], optional): The reduction method
used to override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
Returns:
Tensor: Loss tensor.
"""
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@MODELS.register_module()
class DIoULoss(nn.Module):
r"""Implementation of `Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
eps (float): Epsilon to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self,
eps: float = 1e-6,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): The learning target of the prediction,
shape (n, 4).
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
reduction_override (Optional[str], optional): The reduction method
used to override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
Returns:
Tensor: Loss tensor.
"""
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * diou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@MODELS.register_module()
class CIoULoss(nn.Module):
r"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
eps (float): Epsilon to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
"""
def __init__(self,
eps: float = 1e-6,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): The learning target of the prediction,
shape (n, 4).
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
reduction_override (Optional[str], optional): The reduction method
used to override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
Returns:
Tensor: Loss tensor.
"""
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * ciou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@MODELS.register_module()
class EIoULoss(nn.Module):
r"""Implementation of paper `Extended-IoU Loss: A Systematic
IoU-Related Method: Beyond Simplified Regression for Better
Localization <https://ieeexplore.ieee.org/abstract/document/9429909>`_
Code is modified from https://github.com//ShiqiYu/libfacedetection.train.
Args:
eps (float): Epsilon to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
smooth_point (float): hyperparameter, default is 0.1.
"""
def __init__(self,
eps: float = 1e-6,
reduction: str = 'mean',
loss_weight: float = 1.0,
smooth_point: float = 0.1) -> None:
super().__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.smooth_point = smooth_point
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): The learning target of the prediction,
shape (n, 4).
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
reduction_override (Optional[str], optional): The reduction method
used to override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
Returns:
Tensor: Loss tensor.
"""
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * eiou_loss(
pred,
target,
weight,
smooth_point=self.smooth_point,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
| 26,172 | 34.131544 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/smooth_l1_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
"""Smooth L1 loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@weighted_loss
def l1_loss(pred: Tensor, target: Tensor) -> Tensor:
"""L1 loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
Returns:
Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@MODELS.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
beta: float = 1.0,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@MODELS.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
| 4,971 | 30.468354 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/losses/gfocal_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.losses.utils import weighted_loss
from mmdet.registry import MODELS
@weighted_loss
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@weighted_loss
def quality_focal_loss_tensor_target(pred, target, beta=2.0, activated=False):
"""`QualityFocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
activated (bool): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
# pred and target should be of the same size
assert pred.size() == target.size()
if activated:
pred_sigmoid = pred
loss_function = F.binary_cross_entropy
else:
pred_sigmoid = pred.sigmoid()
loss_function = F.binary_cross_entropy_with_logits
scale_factor = pred_sigmoid
target = target.type_as(pred)
zerolabel = scale_factor.new_zeros(pred.shape)
loss = loss_function(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
pos = (target != 0)
scale_factor = target[pos] - pred_sigmoid[pos]
loss[pos] = loss_function(
pred[pos], target[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@weighted_loss
def quality_focal_loss_with_prob(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Different from `quality_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@weighted_loss
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss
@MODELS.register_module()
class QualityFocalLoss(nn.Module):
r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
def __init__(self,
use_sigmoid=True,
beta=2.0,
reduction='mean',
loss_weight=1.0,
activated=False):
super(QualityFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (Union(tuple([torch.Tensor]),Torch.Tensor)): The type is
tuple, it should be included Target category label with
shape (N,) and target quality label with shape (N,).The type
is torch.Tensor, the target should be one-hot form with
soft weights.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = quality_focal_loss_with_prob
else:
calculate_loss_func = quality_focal_loss
if isinstance(target, torch.Tensor):
# the target shape with (N,C) or (N,C,...), which means
# the target is one-hot form with soft weights.
calculate_loss_func = partial(
quality_focal_loss_tensor_target, activated=self.activated)
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
@MODELS.register_module()
class DistributionFocalLoss(nn.Module):
r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(DistributionFocalLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted general distribution of bounding
boxes (before softmax) with shape (N, n+1), n is the max value
of the integral set `{0, ..., n}` in paper.
target (torch.Tensor): Target distance label for bounding boxes
with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * distribution_focal_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_cls
| 11,838 | 38.996622 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/varifocal_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weight_reduce_loss
def varifocal_loss(pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
alpha: float = 0.75,
gamma: float = 2.0,
iou_weighted: bool = True,
reduction: str = 'mean',
avg_factor: Optional[int] = None) -> Tensor:
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (Tensor): The prediction with shape (N, C), C is the
number of classes.
target (Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
Tensor: Loss tensor.
"""
# pred and target should be of the same size
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@MODELS.register_module()
class VarifocalLoss(nn.Module):
def __init__(self,
use_sigmoid: bool = True,
alpha: float = 0.75,
gamma: float = 2.0,
iou_weighted: bool = True,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super().__init__()
assert use_sigmoid is True, \
'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction with shape (N, C), C is the
number of classes.
target (Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is
the number of classes.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
iou_weighted=self.iou_weighted,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 5,749 | 39.492958 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
from typing import Callable, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
def reduce_loss(loss: Tensor, reduction: str) -> Tensor:
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss: Tensor,
weight: Optional[Tensor] = None,
reduction: str = 'mean',
avg_factor: Optional[float] = None) -> Tensor:
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Optional[Tensor], optional): Element-wise weights.
Defaults to None.
reduction (str, optional): Same as built-in losses of PyTorch.
Defaults to 'mean'.
avg_factor (Optional[float], optional): Average factor when
computing the mean of losses. Defaults to None.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func: Callable) -> Callable:
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
reduction: str = 'mean',
avg_factor: Optional[int] = None,
**kwargs) -> Tensor:
"""
Args:
pred (Tensor): The prediction.
target (Tensor): Target bboxes.
weight (Optional[Tensor], optional): The weight of loss for each
prediction. Defaults to None.
reduction (str, optional): Options are "none", "mean" and "sum".
Defaults to 'mean'.
avg_factor (Optional[int], optional): Average factor that is used
to average the loss. Defaults to None.
Returns:
Tensor: Loss tensor.
"""
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 4,256 | 32.785714 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/seesaw_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .accuracy import accuracy
from .cross_entropy_loss import cross_entropy
from .utils import weight_reduce_loss
def seesaw_ce_loss(cls_score: Tensor,
labels: Tensor,
label_weights: Tensor,
cum_samples: Tensor,
num_classes: int,
p: float,
q: float,
eps: float,
reduction: str = 'mean',
avg_factor: Optional[int] = None) -> Tensor:
"""Calculate the Seesaw CrossEntropy loss.
Args:
cls_score (Tensor): The prediction with shape (N, C),
C is the number of classes.
labels (Tensor): The learning label of the prediction.
label_weights (Tensor): Sample-wise loss weight.
cum_samples (Tensor): Cumulative samples for each category.
num_classes (int): The number of classes.
p (float): The ``p`` in the mitigation factor.
q (float): The ``q`` in the compenstation factor.
eps (float): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
Tensor: The calculated loss
"""
assert cls_score.size(-1) == num_classes
assert len(cum_samples) == num_classes
onehot_labels = F.one_hot(labels, num_classes)
seesaw_weights = cls_score.new_ones(onehot_labels.size())
# mitigation factor
if p > 0:
sample_ratio_matrix = cum_samples[None, :].clamp(
min=1) / cum_samples[:, None].clamp(min=1)
index = (sample_ratio_matrix < 1.0).float()
sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index)
mitigation_factor = sample_weights[labels.long(), :]
seesaw_weights = seesaw_weights * mitigation_factor
# compensation factor
if q > 0:
scores = F.softmax(cls_score.detach(), dim=1)
self_scores = scores[
torch.arange(0, len(scores)).to(scores.device).long(),
labels.long()]
score_matrix = scores / self_scores[:, None].clamp(min=eps)
index = (score_matrix > 1.0).float()
compensation_factor = score_matrix.pow(q) * index + (1 - index)
seesaw_weights = seesaw_weights * compensation_factor
cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))
loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')
if label_weights is not None:
label_weights = label_weights.float()
loss = weight_reduce_loss(
loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)
return loss
@MODELS.register_module()
class SeesawLoss(nn.Module):
"""
Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)
arXiv: https://arxiv.org/abs/2008.10032
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Only False is supported.
p (float, optional): The ``p`` in the mitigation factor.
Defaults to 0.8.
q (float, optional): The ``q`` in the compenstation factor.
Defaults to 2.0.
num_classes (int, optional): The number of classes.
Default to 1203 for LVIS v1 dataset.
eps (float, optional): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
return_dict (bool, optional): Whether return the losses as a dict.
Default to True.
"""
def __init__(self,
use_sigmoid: bool = False,
p: float = 0.8,
q: float = 2.0,
num_classes: int = 1203,
eps: float = 1e-2,
reduction: str = 'mean',
loss_weight: float = 1.0,
return_dict: bool = True) -> None:
super().__init__()
assert not use_sigmoid
self.use_sigmoid = False
self.p = p
self.q = q
self.num_classes = num_classes
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.return_dict = return_dict
# 0 for pos, 1 for neg
self.cls_criterion = seesaw_ce_loss
# cumulative samples for each category
self.register_buffer(
'cum_samples',
torch.zeros(self.num_classes + 1, dtype=torch.float))
# custom output channels of the classifier
self.custom_cls_channels = True
# custom activation of cls_score
self.custom_activation = True
# custom accuracy of the classsifier
self.custom_accuracy = True
def _split_cls_score(self, cls_score: Tensor) -> Tuple[Tensor, Tensor]:
"""split cls_score.
Args:
cls_score (Tensor): The prediction with shape (N, C + 2).
Returns:
Tuple[Tensor, Tensor]: The score for classes and objectness,
respectively
"""
# split cls_score to cls_score_classes and cls_score_objectness
assert cls_score.size(-1) == self.num_classes + 2
cls_score_classes = cls_score[..., :-2]
cls_score_objectness = cls_score[..., -2:]
return cls_score_classes, cls_score_objectness
def get_cls_channels(self, num_classes: int) -> int:
"""Get custom classification channels.
Args:
num_classes (int): The number of classes.
Returns:
int: The custom classification channels.
"""
assert num_classes == self.num_classes
return num_classes + 2
def get_activation(self, cls_score: Tensor) -> Tensor:
"""Get custom activation of cls_score.
Args:
cls_score (Tensor): The prediction with shape (N, C + 2).
Returns:
Tensor: The custom activation of cls_score with shape
(N, C + 1).
"""
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
score_classes = F.softmax(cls_score_classes, dim=-1)
score_objectness = F.softmax(cls_score_objectness, dim=-1)
score_pos = score_objectness[..., [0]]
score_neg = score_objectness[..., [1]]
score_classes = score_classes * score_pos
scores = torch.cat([score_classes, score_neg], dim=-1)
return scores
def get_accuracy(self, cls_score: Tensor,
labels: Tensor) -> Dict[str, Tensor]:
"""Get custom accuracy w.r.t. cls_score and labels.
Args:
cls_score (Tensor): The prediction with shape (N, C + 2).
labels (Tensor): The learning label of the prediction.
Returns:
Dict [str, Tensor]: The accuracy for objectness and classes,
respectively.
"""
pos_inds = labels < self.num_classes
obj_labels = (labels == self.num_classes).long()
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
acc_objectness = accuracy(cls_score_objectness, obj_labels)
acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds])
acc = dict()
acc['acc_objectness'] = acc_objectness
acc['acc_classes'] = acc_classes
return acc
def forward(
self,
cls_score: Tensor,
labels: Tensor,
label_weights: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None
) -> Union[Tensor, Dict[str, Tensor]]:
"""Forward function.
Args:
cls_score (Tensor): The prediction with shape (N, C + 2).
labels (Tensor): The learning label of the prediction.
label_weights (Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
Tensor | Dict [str, Tensor]:
if return_dict == False: The calculated loss |
if return_dict == True: The dict of calculated losses
for objectness and classes, respectively.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
assert cls_score.size(-1) == self.num_classes + 2
pos_inds = labels < self.num_classes
# 0 for pos, 1 for neg
obj_labels = (labels == self.num_classes).long()
# accumulate the samples for each category
unique_labels = labels.unique()
for u_l in unique_labels:
inds_ = labels == u_l.item()
self.cum_samples[u_l] += inds_.sum()
if label_weights is not None:
label_weights = label_weights.float()
else:
label_weights = labels.new_ones(labels.size(), dtype=torch.float)
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
# calculate loss_cls_classes (only need pos samples)
if pos_inds.sum() > 0:
loss_cls_classes = self.loss_weight * self.cls_criterion(
cls_score_classes[pos_inds], labels[pos_inds],
label_weights[pos_inds], self.cum_samples[:self.num_classes],
self.num_classes, self.p, self.q, self.eps, reduction,
avg_factor)
else:
loss_cls_classes = cls_score_classes[pos_inds].sum()
# calculate loss_cls_objectness
loss_cls_objectness = self.loss_weight * cross_entropy(
cls_score_objectness, obj_labels, label_weights, reduction,
avg_factor)
if self.return_dict:
loss_cls = dict()
loss_cls['loss_cls_objectness'] = loss_cls_objectness
loss_cls['loss_cls_classes'] = loss_cls_classes
else:
loss_cls = loss_cls_classes + loss_cls_objectness
return loss_cls
| 10,723 | 37.437276 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/ae_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@MODELS.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
| 3,810 | 36.362745 | 143 |
py
|
ERD
|
ERD-main/mmdet/models/losses/accuracy.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| 2,953 | 36.871795 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/focal_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from mmdet.registry import MODELS
from .utils import weight_reduce_loss
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def py_focal_loss_with_prob(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Different from `py_sigmoid_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): The prediction probability with shape (N, C),
C is the number of classes.
target (torch.Tensor): The learning label of the prediction.
The target shape support (N,C) or (N,), (N,C) means one-hot form.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
if pred.dim() != target.dim():
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
target = target.type_as(pred)
pt = (1 - pred) * target + pred * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A wrapper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,
alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@MODELS.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0,
activated=False):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
The target shape support (N,C) or (N,), (N,C) means
one-hot form.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = py_focal_loss_with_prob
else:
if pred.dim() == target.dim():
# this means that target is already in One-Hot form.
calculate_loss_func = py_sigmoid_focal_loss
elif torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = sigmoid_focal_loss
else:
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 10,834 | 41.996032 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/cross_entropy_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100,
avg_non_ignore=False):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
Returns:
torch.Tensor: The calculated loss
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
# element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# average loss over non-ignored elements
# pytorch's official cross_entropy average loss over non-ignored elements
# refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa
if (avg_factor is None) and avg_non_ignore and reduction == 'mean':
avg_factor = label.numel() - (label == ignore_index).sum().item()
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights, valid_mask
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100,
avg_non_ignore=False):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1) or (N, ).
When the shape of pred is (N, 1), label will be expanded to
one-hot format, and when the shape of pred is (N, ), label
will not be expanded to one-hot format.
label (torch.Tensor): The learning label of the prediction,
with shape (N, ).
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
Returns:
torch.Tensor: The calculated loss.
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
if pred.dim() != label.dim():
label, weight, valid_mask = _expand_onehot_labels(
label, weight, pred.size(-1), ignore_index)
else:
# should mask out the ignored elements
valid_mask = ((label >= 0) & (label != ignore_index)).float()
if weight is not None:
# The inplace writing method will have a mismatched broadcast
# shape error if the weight and valid_mask dimensions
# are inconsistent such as (B,N,1) and (B,N,C).
weight = weight * valid_mask
else:
weight = valid_mask
# average loss over non-ignored elements
if (avg_factor is None) and avg_non_ignore and reduction == 'mean':
avg_factor = valid_mask.sum().item()
# weighted element-wise losses
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None,
**kwargs):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C, *), C is the
number of classes. The trailing * indicates arbitrary shape.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
Example:
>>> N, C = 3, 11
>>> H, W = 2, 2
>>> pred = torch.randn(N, C, H, W) * 1000
>>> target = torch.rand(N, H, W)
>>> label = torch.randint(0, C, size=(N,))
>>> reduction = 'mean'
>>> avg_factor = None
>>> class_weights = None
>>> loss = mask_cross_entropy(pred, target, label, reduction,
>>> avg_factor, class_weights)
>>> assert loss.shape == (1,)
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@MODELS.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index=None,
loss_weight=1.0,
avg_non_ignore=False):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
ignore_index (int | None): The label index to be ignored.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
self.avg_non_ignore = avg_non_ignore
if ((ignore_index is not None) and not self.avg_non_ignore
and self.reduction == 'mean'):
warnings.warn(
'Default ``avg_non_ignore`` is False, if you would like to '
'ignore the certain label and average loss over non-ignore '
'labels, which is the same with PyTorch official '
'cross_entropy, set ``avg_non_ignore=True``.')
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def extra_repr(self):
"""Extra repr."""
s = f'avg_non_ignore={self.avg_non_ignore}'
return s
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss. Options are "none", "mean" and "sum".
ignore_index (int | None): The label index to be ignored.
If not None, it will override the default value. Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if ignore_index is None:
ignore_index = self.ignore_index
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
avg_non_ignore=self.avg_non_ignore,
**kwargs)
return loss_cls
| 12,148 | 39.228477 | 132 |
py
|
ERD
|
ERD-main/mmdet/models/losses/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss'
]
| 1,747 | 50.411765 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/losses/gaussian_focal_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weight_reduce_loss, weighted_loss
@weighted_loss
def gaussian_focal_loss(pred: Tensor,
gaussian_target: Tensor,
alpha: float = 2.0,
gamma: float = 4.0,
pos_weight: float = 1.0,
neg_weight: float = 1.0) -> Tensor:
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
pos_weight(float): Positive sample loss weight. Defaults to 1.0.
neg_weight(float): Negative sample loss weight. Defaults to 1.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_weight * pos_loss + neg_weight * neg_loss
def gaussian_focal_loss_with_pos_inds(
pred: Tensor,
gaussian_target: Tensor,
pos_inds: Tensor,
pos_labels: Tensor,
alpha: float = 2.0,
gamma: float = 4.0,
pos_weight: float = 1.0,
neg_weight: float = 1.0,
reduction: str = 'mean',
avg_factor: Optional[Union[int, float]] = None) -> Tensor:
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Note: The index with a value of 1 in ``gaussian_target`` in the
``gaussian_focal_loss`` function is a positive sample, but in
``gaussian_focal_loss_with_pos_inds`` the positive sample is passed
in through the ``pos_inds`` parameter.
Args:
pred (torch.Tensor): The prediction. The shape is (N, num_classes).
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution. The shape is (N, num_classes).
pos_inds (torch.Tensor): The positive sample index.
The shape is (M, ).
pos_labels (torch.Tensor): The label corresponding to the positive
sample index. The shape is (M, ).
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
pos_weight(float): Positive sample loss weight. Defaults to 1.0.
neg_weight(float): Negative sample loss weight. Defaults to 1.0.
reduction (str): Options are "none", "mean" and "sum".
Defaults to 'mean`.
avg_factor (int, float, optional): Average factor that is used to
average the loss. Defaults to None.
"""
eps = 1e-12
neg_weights = (1 - gaussian_target).pow(gamma)
pos_pred_pix = pred[pos_inds]
pos_pred = pos_pred_pix.gather(1, pos_labels.unsqueeze(1))
pos_loss = -(pos_pred + eps).log() * (1 - pos_pred).pow(alpha)
pos_loss = weight_reduce_loss(pos_loss, None, reduction, avg_factor)
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
neg_loss = weight_reduce_loss(neg_loss, None, reduction, avg_factor)
return pos_weight * pos_loss + neg_weight * neg_loss
@MODELS.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
pos_weight(float): Positive sample loss weight. Defaults to 1.0.
neg_weight(float): Negative sample loss weight. Defaults to 1.0.
"""
def __init__(self,
alpha: float = 2.0,
gamma: float = 4.0,
reduction: str = 'mean',
loss_weight: float = 1.0,
pos_weight: float = 1.0,
neg_weight: float = 1.0) -> None:
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
self.pos_weight = pos_weight
self.neg_weight = neg_weight
def forward(self,
pred: Tensor,
target: Tensor,
pos_inds: Optional[Tensor] = None,
pos_labels: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
avg_factor: Optional[Union[int, float]] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
If you want to manually determine which positions are
positive samples, you can set the pos_index and pos_label
parameter. Currently, only the CenterNet update version uses
the parameter.
Args:
pred (torch.Tensor): The prediction. The shape is (N, num_classes).
target (torch.Tensor): The learning target of the prediction
in gaussian distribution. The shape is (N, num_classes).
pos_inds (torch.Tensor): The positive sample index.
Defaults to None.
pos_labels (torch.Tensor): The label corresponding to the positive
sample index. Defaults to None.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, float, optional): Average factor that is used to
average the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if pos_inds is not None:
assert pos_labels is not None
# Only used by centernet update version
loss_reg = self.loss_weight * gaussian_focal_loss_with_pos_inds(
pred,
target,
pos_inds,
pos_labels,
alpha=self.alpha,
gamma=self.gamma,
pos_weight=self.pos_weight,
neg_weight=self.neg_weight,
reduction=reduction,
avg_factor=avg_factor)
else:
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
pos_weight=self.pos_weight,
neg_weight=self.neg_weight,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
| 7,689 | 40.122995 | 108 |
py
|
ERD
|
ERD-main/mmdet/models/losses/kd_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
from .utils import weighted_loss
@weighted_loss
def knowledge_distillation_kl_div_loss(pred: Tensor,
soft_label: Tensor,
T: int,
detach_target: bool = True) -> Tensor:
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@MODELS.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: float = 1.0,
T: int = 10) -> None:
super().__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred: Tensor,
soft_label: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None) -> Tensor:
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Loss tensor.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
| 3,123 | 31.541667 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/pvt.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.drop import build_dropout
from mmcv.cnn.bricks.transformer import MultiheadAttention
from mmengine.logging import MMLogger
from mmengine.model import (BaseModule, ModuleList, Sequential, constant_init,
normal_init, trunc_normal_init)
from mmengine.model.weight_init import trunc_normal_
from mmengine.runner.checkpoint import CheckpointLoader, load_state_dict
from torch.nn.modules.utils import _pair as to_2tuple
from mmdet.registry import MODELS
from ..layers import PatchEmbed, nchw_to_nlc, nlc_to_nchw
class MixFFN(BaseModule):
"""An implementation of MixFFN of PVT.
The differences between MixFFN & FFN:
1. Use 1X1 Conv to replace Linear layer.
2. Introduce 3X3 Depth-wise Conv to encode positional information.
Args:
embed_dims (int): The feature dimension. Same as
`MultiheadAttention`.
feedforward_channels (int): The hidden dimension of FFNs.
act_cfg (dict, optional): The activation config for FFNs.
Default: dict(type='GELU').
ffn_drop (float, optional): Probability of an element to be
zeroed in FFN. Default 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut.
Default: None.
use_conv (bool): If True, add 3x3 DWConv between two Linear layers.
Defaults: False.
init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
feedforward_channels,
act_cfg=dict(type='GELU'),
ffn_drop=0.,
dropout_layer=None,
use_conv=False,
init_cfg=None):
super(MixFFN, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.act_cfg = act_cfg
activate = build_activation_layer(act_cfg)
in_channels = embed_dims
fc1 = Conv2d(
in_channels=in_channels,
out_channels=feedforward_channels,
kernel_size=1,
stride=1,
bias=True)
if use_conv:
# 3x3 depth wise conv to provide positional encode information
dw_conv = Conv2d(
in_channels=feedforward_channels,
out_channels=feedforward_channels,
kernel_size=3,
stride=1,
padding=(3 - 1) // 2,
bias=True,
groups=feedforward_channels)
fc2 = Conv2d(
in_channels=feedforward_channels,
out_channels=in_channels,
kernel_size=1,
stride=1,
bias=True)
drop = nn.Dropout(ffn_drop)
layers = [fc1, activate, drop, fc2, drop]
if use_conv:
layers.insert(1, dw_conv)
self.layers = Sequential(*layers)
self.dropout_layer = build_dropout(
dropout_layer) if dropout_layer else torch.nn.Identity()
def forward(self, x, hw_shape, identity=None):
out = nlc_to_nchw(x, hw_shape)
out = self.layers(out)
out = nchw_to_nlc(out)
if identity is None:
identity = x
return identity + self.dropout_layer(out)
class SpatialReductionAttention(MultiheadAttention):
"""An implementation of Spatial Reduction Attention of PVT.
This module is modified from MultiheadAttention which is a module from
mmcv.cnn.bricks.transformer.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
attn_drop (float): A Dropout layer on attn_output_weights.
Default: 0.0.
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
Default: 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut. Default: None.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: False.
qkv_bias (bool): enable bias for qkv if True. Default: True.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
sr_ratio (int): The ratio of spatial reduction of Spatial Reduction
Attention of PVT. Default: 1.
init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
dropout_layer=None,
batch_first=True,
qkv_bias=True,
norm_cfg=dict(type='LN'),
sr_ratio=1,
init_cfg=None):
super().__init__(
embed_dims,
num_heads,
attn_drop,
proj_drop,
batch_first=batch_first,
dropout_layer=dropout_layer,
bias=qkv_bias,
init_cfg=init_cfg)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = Conv2d(
in_channels=embed_dims,
out_channels=embed_dims,
kernel_size=sr_ratio,
stride=sr_ratio)
# The ret[0] of build_norm_layer is norm name.
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
# handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa
from mmdet import digit_version, mmcv_version
if mmcv_version < digit_version('1.3.17'):
warnings.warn('The legacy version of forward function in'
'SpatialReductionAttention is deprecated in'
'mmcv>=1.3.17 and will no longer support in the'
'future. Please upgrade your mmcv.')
self.forward = self.legacy_forward
def forward(self, x, hw_shape, identity=None):
x_q = x
if self.sr_ratio > 1:
x_kv = nlc_to_nchw(x, hw_shape)
x_kv = self.sr(x_kv)
x_kv = nchw_to_nlc(x_kv)
x_kv = self.norm(x_kv)
else:
x_kv = x
if identity is None:
identity = x_q
# Because the dataflow('key', 'query', 'value') of
# ``torch.nn.MultiheadAttention`` is (num_queries, batch,
# embed_dims), We should adjust the shape of dataflow from
# batch_first (batch, num_queries, embed_dims) to num_queries_first
# (num_queries ,batch, embed_dims), and recover ``attn_output``
# from num_queries_first to batch_first.
if self.batch_first:
x_q = x_q.transpose(0, 1)
x_kv = x_kv.transpose(0, 1)
out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]
if self.batch_first:
out = out.transpose(0, 1)
return identity + self.dropout_layer(self.proj_drop(out))
def legacy_forward(self, x, hw_shape, identity=None):
"""multi head attention forward in mmcv version < 1.3.17."""
x_q = x
if self.sr_ratio > 1:
x_kv = nlc_to_nchw(x, hw_shape)
x_kv = self.sr(x_kv)
x_kv = nchw_to_nlc(x_kv)
x_kv = self.norm(x_kv)
else:
x_kv = x
if identity is None:
identity = x_q
out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]
return identity + self.dropout_layer(self.proj_drop(out))
class PVTEncoderLayer(BaseModule):
"""Implements one encoder layer in PVT.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed.
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): stochastic depth rate. Default: 0.0.
qkv_bias (bool): enable bias for qkv if True.
Default: True.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
sr_ratio (int): The ratio of spatial reduction of Spatial Reduction
Attention of PVT. Default: 1.
use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.
Default: False.
init_cfg (dict, optional): Initialization config dict.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
sr_ratio=1,
use_conv_ffn=False,
init_cfg=None):
super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg)
# The ret[0] of build_norm_layer is norm name.
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = SpatialReductionAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
qkv_bias=qkv_bias,
norm_cfg=norm_cfg,
sr_ratio=sr_ratio)
# The ret[0] of build_norm_layer is norm name.
self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]
self.ffn = MixFFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
use_conv=use_conv_ffn,
act_cfg=act_cfg)
def forward(self, x, hw_shape):
x = self.attn(self.norm1(x), hw_shape, identity=x)
x = self.ffn(self.norm2(x), hw_shape, identity=x)
return x
class AbsolutePositionEmbedding(BaseModule):
"""An implementation of the absolute position embedding in PVT.
Args:
pos_shape (int): The shape of the absolute position embedding.
pos_dim (int): The dimension of the absolute position embedding.
drop_rate (float): Probability of an element to be zeroed.
Default: 0.0.
"""
def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None):
super().__init__(init_cfg=init_cfg)
if isinstance(pos_shape, int):
pos_shape = to_2tuple(pos_shape)
elif isinstance(pos_shape, tuple):
if len(pos_shape) == 1:
pos_shape = to_2tuple(pos_shape[0])
assert len(pos_shape) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pos_shape)}'
self.pos_shape = pos_shape
self.pos_dim = pos_dim
self.pos_embed = nn.Parameter(
torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim))
self.drop = nn.Dropout(p=drop_rate)
def init_weights(self):
trunc_normal_(self.pos_embed, std=0.02)
def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'):
"""Resize pos_embed weights.
Resize pos_embed using bilinear interpolate method.
Args:
pos_embed (torch.Tensor): Position embedding weights.
input_shape (tuple): Tuple for (downsampled input image height,
downsampled input image width).
mode (str): Algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'bilinear'``.
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C].
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
pos_h, pos_w = self.pos_shape
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous()
pos_embed_weight = F.interpolate(
pos_embed_weight, size=input_shape, mode=mode)
pos_embed_weight = torch.flatten(pos_embed_weight,
2).transpose(1, 2).contiguous()
pos_embed = pos_embed_weight
return pos_embed
def forward(self, x, hw_shape, mode='bilinear'):
pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode)
return self.drop(x + pos_embed)
@MODELS.register_module()
class PyramidVisionTransformer(BaseModule):
"""Pyramid Vision Transformer (PVT)
Implementation of `Pyramid Vision Transformer: A Versatile Backbone for
Dense Prediction without Convolutions
<https://arxiv.org/pdf/2102.12122.pdf>`_.
Args:
pretrain_img_size (int | tuple[int]): The size of input image when
pretrain. Defaults: 224.
in_channels (int): Number of input channels. Default: 3.
embed_dims (int): Embedding dimension. Default: 64.
num_stags (int): The num of stages. Default: 4.
num_layers (Sequence[int]): The layer number of each transformer encode
layer. Default: [3, 4, 6, 3].
num_heads (Sequence[int]): The attention heads of each transformer
encode layer. Default: [1, 2, 5, 8].
patch_sizes (Sequence[int]): The patch_size of each patch embedding.
Default: [4, 2, 2, 2].
strides (Sequence[int]): The stride of each patch embedding.
Default: [4, 2, 2, 2].
paddings (Sequence[int]): The padding of each patch embedding.
Default: [0, 0, 0, 0].
sr_ratios (Sequence[int]): The spatial reduction rate of each
transformer encode layer. Default: [8, 4, 2, 1].
out_indices (Sequence[int] | int): Output from which stages.
Default: (0, 1, 2, 3).
mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the
embedding dim of each transformer encode layer.
Default: [8, 8, 4, 4].
qkv_bias (bool): Enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.1.
use_abs_pos_embed (bool): If True, add absolute position embedding to
the patch embedding. Defaults: True.
use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.
Default: False.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
pretrained (str, optional): model pretrained path. Default: None.
convert_weights (bool): The flag indicates whether the
pre-trained model is from the original repo. We may need
to convert some keys to make it compatible.
Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
pretrain_img_size=224,
in_channels=3,
embed_dims=64,
num_stages=4,
num_layers=[3, 4, 6, 3],
num_heads=[1, 2, 5, 8],
patch_sizes=[4, 2, 2, 2],
strides=[4, 2, 2, 2],
paddings=[0, 0, 0, 0],
sr_ratios=[8, 4, 2, 1],
out_indices=(0, 1, 2, 3),
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
use_abs_pos_embed=True,
norm_after_stage=False,
use_conv_ffn=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', eps=1e-6),
pretrained=None,
convert_weights=True,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.convert_weights = convert_weights
if isinstance(pretrain_img_size, int):
pretrain_img_size = to_2tuple(pretrain_img_size)
elif isinstance(pretrain_img_size, tuple):
if len(pretrain_img_size) == 1:
pretrain_img_size = to_2tuple(pretrain_img_size[0])
assert len(pretrain_img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pretrain_img_size)}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = init_cfg
else:
raise TypeError('pretrained must be a str or None')
self.embed_dims = embed_dims
self.num_stages = num_stages
self.num_layers = num_layers
self.num_heads = num_heads
self.patch_sizes = patch_sizes
self.strides = strides
self.sr_ratios = sr_ratios
assert num_stages == len(num_layers) == len(num_heads) \
== len(patch_sizes) == len(strides) == len(sr_ratios)
self.out_indices = out_indices
assert max(out_indices) < self.num_stages
self.pretrained = pretrained
# transformer encoder
dpr = [
x.item()
for x in torch.linspace(0, drop_path_rate, sum(num_layers))
] # stochastic num_layer decay rule
cur = 0
self.layers = ModuleList()
for i, num_layer in enumerate(num_layers):
embed_dims_i = embed_dims * num_heads[i]
patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims_i,
kernel_size=patch_sizes[i],
stride=strides[i],
padding=paddings[i],
bias=True,
norm_cfg=norm_cfg)
layers = ModuleList()
if use_abs_pos_embed:
pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1])
pos_embed = AbsolutePositionEmbedding(
pos_shape=pos_shape,
pos_dim=embed_dims_i,
drop_rate=drop_rate)
layers.append(pos_embed)
layers.extend([
PVTEncoderLayer(
embed_dims=embed_dims_i,
num_heads=num_heads[i],
feedforward_channels=mlp_ratios[i] * embed_dims_i,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[cur + idx],
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
sr_ratio=sr_ratios[i],
use_conv_ffn=use_conv_ffn) for idx in range(num_layer)
])
in_channels = embed_dims_i
# The ret[0] of build_norm_layer is norm name.
if norm_after_stage:
norm = build_norm_layer(norm_cfg, embed_dims_i)[1]
else:
norm = nn.Identity()
self.layers.append(ModuleList([patch_embed, layers, norm]))
cur += num_layer
def init_weights(self):
logger = MMLogger.get_current_instance()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.LayerNorm):
constant_init(m, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[
1] * m.out_channels
fan_out //= m.groups
normal_init(m, 0, math.sqrt(2.0 / fan_out))
elif isinstance(m, AbsolutePositionEmbedding):
m.init_weights()
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
checkpoint = CheckpointLoader.load_checkpoint(
self.init_cfg.checkpoint, logger=logger, map_location='cpu')
logger.warn(f'Load pre-trained model for '
f'{self.__class__.__name__} from original repo')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
if self.convert_weights:
# Because pvt backbones are not supported by mmcls,
# so we need to convert pre-trained weights to match this
# implementation.
state_dict = pvt_convert(state_dict)
load_state_dict(self, state_dict, strict=False, logger=logger)
def forward(self, x):
outs = []
for i, layer in enumerate(self.layers):
x, hw_shape = layer[0](x)
for block in layer[1]:
x = block(x, hw_shape)
x = layer[2](x)
x = nlc_to_nchw(x, hw_shape)
if i in self.out_indices:
outs.append(x)
return outs
@MODELS.register_module()
class PyramidVisionTransformerV2(PyramidVisionTransformer):
"""Implementation of `PVTv2: Improved Baselines with Pyramid Vision
Transformer <https://arxiv.org/pdf/2106.13797.pdf>`_."""
def __init__(self, **kwargs):
super(PyramidVisionTransformerV2, self).__init__(
patch_sizes=[7, 3, 3, 3],
paddings=[3, 1, 1, 1],
use_abs_pos_embed=False,
norm_after_stage=True,
use_conv_ffn=True,
**kwargs)
def pvt_convert(ckpt):
new_ckpt = OrderedDict()
# Process the concat between q linear weights and kv linear weights
use_abs_pos_embed = False
use_conv_ffn = False
for k in ckpt.keys():
if k.startswith('pos_embed'):
use_abs_pos_embed = True
if k.find('dwconv') >= 0:
use_conv_ffn = True
for k, v in ckpt.items():
if k.startswith('head'):
continue
if k.startswith('norm.'):
continue
if k.startswith('cls_token'):
continue
if k.startswith('pos_embed'):
stage_i = int(k.replace('pos_embed', ''))
new_k = k.replace(f'pos_embed{stage_i}',
f'layers.{stage_i - 1}.1.0.pos_embed')
if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7
new_v = v[:, 1:, :] # remove cls token
else:
new_v = v
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}',
f'layers.{stage_i - 1}.0')
new_v = v
if 'proj.' in new_k:
new_k = new_k.replace('proj.', 'projection.')
elif k.startswith('block'):
stage_i = int(k.split('.')[0].replace('block', ''))
layer_i = int(k.split('.')[1])
new_layer_i = layer_i + use_abs_pos_embed
new_k = k.replace(f'block{stage_i}.{layer_i}',
f'layers.{stage_i - 1}.1.{new_layer_i}')
new_v = v
if 'attn.q.' in new_k:
sub_item_k = k.replace('q.', 'kv.')
new_k = new_k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
elif 'attn.kv.' in new_k:
continue
elif 'attn.proj.' in new_k:
new_k = new_k.replace('proj.', 'attn.out_proj.')
elif 'attn.sr.' in new_k:
new_k = new_k.replace('sr.', 'sr.')
elif 'mlp.' in new_k:
string = f'{new_k}-'
new_k = new_k.replace('mlp.', 'ffn.layers.')
if 'fc1.weight' in new_k or 'fc2.weight' in new_k:
new_v = v.reshape((*v.shape, 1, 1))
new_k = new_k.replace('fc1.', '0.')
new_k = new_k.replace('dwconv.dwconv.', '1.')
if use_conv_ffn:
new_k = new_k.replace('fc2.', '4.')
else:
new_k = new_k.replace('fc2.', '3.')
string += f'{new_k} {v.shape}-{new_v.shape}'
elif k.startswith('norm'):
stage_i = int(k[4])
new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')
new_v = v
else:
new_k = k
new_v = v
new_ckpt[new_k] = new_v
return new_ckpt
| 26,272 | 38.448949 | 89 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/hrnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule, ModuleList, Sequential
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
from .resnet import BasicBlock, Bottleneck
class HRModule(BaseModule):
"""High-Resolution Module for HRNet.
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
block_init_cfg=None,
init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) ' \
f'!= NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
block.expansion)[1])
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False),
build_norm_layer(self.norm_cfg, in_channels[i])[1],
nn.Upsample(
scale_factor=2**(j - i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[i])[1]))
else:
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
in_channels[j])[1],
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
@MODELS.register_module()
class HRNet(BaseModule):
"""HRNet backbone.
`High-Resolution Representations for Labeling Pixels and Regions
arXiv: <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules(int): The number of HRModule in this stage.
- num_branches(int): The number of branches in the HRModule.
- block(str): The type of convolution block.
- num_blocks(tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels(tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Default: 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: True.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmdet.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self,
extra,
in_channels=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
norm_eval=True,
with_cp=False,
zero_init_residual=False,
multiscale_output=True,
pretrained=None,
init_cfg=None):
super(HRNet, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
# Assert configurations of 4 stages are in extra
assert 'stage1' in extra and 'stage2' in extra \
and 'stage3' in extra and 'stage4' in extra
# Assert whether the length of `num_blocks` and `num_channels` are
# equal to `num_branches`
for i in range(4):
cfg = extra[f'stage{i + 1}']
assert len(cfg['num_blocks']) == cfg['num_branches'] and \
len(cfg['num_channels']) == cfg['num_branches']
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
# stem net
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
self.conv_cfg,
64,
64,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
# stage 1
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = num_channels * block.expansion
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
# stage 2
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels],
num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
# stage 3
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
# stage 4
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [channel * block.expansion for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels,
num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: the normalization layer named "norm2" """
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer,
num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg,
num_channels_cur_layer[i])[1],
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(i + 1 - num_branches_pre):
in_channels = num_channels_pre_layer[-1]
out_channels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else in_channels
conv_downsamples.append(
nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
self.conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
layers = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
layers.append(
block(
inplanes,
planes,
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg,
))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
init_cfg=block_init_cfg))
return Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
block_init_cfg = None
if self.pretrained is None and not hasattr(
self, 'init_cfg') and self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
for i in range(num_modules):
# multi_scale_output is only used for the last module
if not multiscale_output and i == num_modules - 1:
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(
HRModule(
num_branches,
block,
num_blocks,
in_channels,
num_channels,
reset_multiscale_output,
with_cp=self.with_cp,
norm_cfg=self.norm_cfg,
conv_cfg=self.conv_cfg,
block_init_cfg=block_init_cfg))
return Sequential(*hr_modules), in_channels
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
"""Convert the model into training mode will keeping the normalization
layer freezed."""
super(HRNet, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 23,108 | 38.167797 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/regnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmdet.registry import MODELS
from .resnet import ResNet
from .resnext import Bottleneck
@MODELS.register_module()
class RegNet(ResNet):
"""RegNet backbone.
More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .
Args:
arch (dict): The parameter of RegNets.
- w0 (int): initial width
- wa (float): slope of width
- wm (float): quantization parameter to quantize the width
- depth (int): depth of the backbone
- group_w (int): width of group
- bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
strides (Sequence[int]): Strides of the first block of each stage.
base_channels (int): Base channels after stem layer.
in_channels (int): Number of input image channels. Default: 3.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import RegNet
>>> import torch
>>> self = RegNet(
arch=dict(
w0=88,
wa=26.31,
wm=2.25,
group_w=48,
depth=25,
bot_mul=1.0))
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 96, 8, 8)
(1, 192, 4, 4)
(1, 432, 2, 2)
(1, 1008, 1, 1)
"""
arch_settings = {
'regnetx_400mf':
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
'regnetx_800mf':
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),
'regnetx_1.6gf':
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),
'regnetx_3.2gf':
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),
'regnetx_4.0gf':
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),
'regnetx_6.4gf':
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),
'regnetx_8.0gf':
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),
'regnetx_12gf':
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),
}
def __init__(self,
arch,
in_channels=3,
stem_channels=32,
base_channels=32,
strides=(2, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
# Generate RegNet parameters first
if isinstance(arch, str):
assert arch in self.arch_settings, \
f'"arch": "{arch}" is not one of the' \
' arch_settings'
arch = self.arch_settings[arch]
elif not isinstance(arch, dict):
raise ValueError('Expect "arch" to be either a string '
f'or a dict, got {type(arch)}')
widths, num_stages = self.generate_regnet(
arch['w0'],
arch['wa'],
arch['wm'],
arch['depth'],
)
# Convert to per stage format
stage_widths, stage_blocks = self.get_stages_from_blocks(widths)
# Generate group widths and bot muls
group_widths = [arch['group_w'] for _ in range(num_stages)]
self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
# Adjust the compatibility of stage_widths and group_widths
stage_widths, group_widths = self.adjust_width_group(
stage_widths, self.bottleneck_ratio, group_widths)
# Group params by stage
self.stage_widths = stage_widths
self.group_widths = group_widths
self.depth = sum(stage_blocks)
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.zero_init_residual = zero_init_residual
self.block = Bottleneck
expansion_bak = self.block.expansion
self.block.expansion = 1
self.stage_blocks = stage_blocks[:num_stages]
self._make_stem_layer(in_channels, stem_channels)
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
if self.zero_init_residual:
block_init_cfg = dict(
type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.inplanes = stem_channels
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
group_width = self.group_widths[i]
width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))
stage_groups = width // group_width
dcn = self.dcn if self.stage_with_dcn[i] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=self.stage_widths[i],
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
groups=stage_groups,
base_width=group_width,
base_channels=self.stage_widths[i],
init_cfg=block_init_cfg)
self.inplanes = self.stage_widths[i]
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = stage_widths[-1]
self.block.expansion = expansion_bak
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=2,
padding=1,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def generate_regnet(self,
initial_width,
width_slope,
width_parameter,
depth,
divisor=8):
"""Generates per block width from RegNet parameters.
Args:
initial_width ([int]): Initial width of the backbone
width_slope ([float]): Slope of the quantized linear function
width_parameter ([int]): Parameter used to quantize the width.
depth ([int]): Depth of the backbone.
divisor (int, optional): The divisor of channels. Defaults to 8.
Returns:
list, int: return a list of widths of each stage and the number \
of stages
"""
assert width_slope >= 0
assert initial_width > 0
assert width_parameter > 1
assert initial_width % divisor == 0
widths_cont = np.arange(depth) * width_slope + initial_width
ks = np.round(
np.log(widths_cont / initial_width) / np.log(width_parameter))
widths = initial_width * np.power(width_parameter, ks)
widths = np.round(np.divide(widths, divisor)) * divisor
num_stages = len(np.unique(widths))
widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
return widths, num_stages
@staticmethod
def quantize_float(number, divisor):
"""Converts a float to closest non-zero int divisible by divisor.
Args:
number (int): Original number to be quantized.
divisor (int): Divisor used to quantize the number.
Returns:
int: quantized number that is divisible by devisor.
"""
return int(round(number / divisor) * divisor)
def adjust_width_group(self, widths, bottleneck_ratio, groups):
"""Adjusts the compatibility of widths and groups.
Args:
widths (list[int]): Width of each stage.
bottleneck_ratio (float): Bottleneck ratio.
groups (int): number of groups in each stage
Returns:
tuple(list): The adjusted widths and groups of each stage.
"""
bottleneck_width = [
int(w * b) for w, b in zip(widths, bottleneck_ratio)
]
groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]
bottleneck_width = [
self.quantize_float(w_bot, g)
for w_bot, g in zip(bottleneck_width, groups)
]
widths = [
int(w_bot / b)
for w_bot, b in zip(bottleneck_width, bottleneck_ratio)
]
return widths, groups
def get_stages_from_blocks(self, widths):
"""Gets widths/stage_blocks of network at each stage.
Args:
widths (list[int]): Width in each stage.
Returns:
tuple(list): width and depth of each stage
"""
width_diff = [
width != width_prev
for width, width_prev in zip(widths + [0], [0] + widths)
]
stage_widths = [
width for width, diff in zip(widths, width_diff[:-1]) if diff
]
stage_blocks = np.diff([
depth for depth, diff in zip(range(len(width_diff)), width_diff)
if diff
]).tolist()
return stage_widths, stage_blocks
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 13,604 | 37.109244 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/mobilenet_v2.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
from ..layers import InvertedResidual
from ..utils import make_divisible
@MODELS.register_module()
class MobileNetV2(BaseModule):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (Sequence[int], optional): Output from which stages.
Default: (1, 2, 4, 7).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(1, 2, 4, 7),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(MobileNetV2, self).__init__(init_cfg)
self.pretrained = pretrained
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
self.widen_factor = widen_factor
self.out_indices = out_indices
if not set(out_indices).issubset(set(range(0, 8))):
raise ValueError('out_indices must be a subset of range'
f'(0, 8). But received {out_indices}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
mid_channels=int(round(self.in_channels * expand_ratio)),
stride=stride,
with_expand_conv=expand_ratio != 1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
frozen."""
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| 7,621 | 37.301508 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/swin.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, build_dropout
from mmengine.logging import MMLogger
from mmengine.model import BaseModule, ModuleList
from mmengine.model.weight_init import (constant_init, trunc_normal_,
trunc_normal_init)
from mmengine.runner.checkpoint import CheckpointLoader
from mmengine.utils import to_2tuple
from mmdet.registry import MODELS
from ..layers import PatchEmbed, PatchMerging
class WindowMSA(BaseModule):
"""Window based multi-head self-attention (W-MSA) module with relative
position bias.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (tuple[int]): The height and width of the window.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Default: 0.0
proj_drop_rate (float, optional): Dropout ratio of output. Default: 0.
init_cfg (dict | None, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
window_size,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0.,
proj_drop_rate=0.,
init_cfg=None):
super().__init__()
self.embed_dims = embed_dims
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_embed_dims = embed_dims // num_heads
self.scale = qk_scale or head_embed_dims**-0.5
self.init_cfg = init_cfg
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# About 2x faster than original impl
Wh, Ww = self.window_size
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous()
self.register_buffer('relative_position_index', rel_position_index)
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_rate)
self.proj = nn.Linear(embed_dims, embed_dims)
self.proj_drop = nn.Dropout(proj_drop_rate)
self.softmax = nn.Softmax(dim=-1)
def init_weights(self):
trunc_normal_(self.relative_position_bias_table, std=0.02)
def forward(self, x, mask=None):
"""
Args:
x (tensor): input features with shape of (num_windows*B, N, C)
mask (tensor | None, Optional): mask with shape of (num_windows,
Wh*Ww, Wh*Ww), value should be between (-inf, 0].
"""
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
class ShiftWindowMSA(BaseModule):
"""Shifted Window Multihead Self-Attention Module.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): The height and width of the window.
shift_size (int, optional): The shift step of each window towards
right-bottom. If zero, act as regular window-msa. Defaults to 0.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Defaults: 0.
proj_drop_rate (float, optional): Dropout ratio of output.
Defaults: 0.
dropout_layer (dict, optional): The dropout_layer used before output.
Defaults: dict(type='DropPath', drop_prob=0.).
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
window_size,
shift_size=0,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0,
proj_drop_rate=0,
dropout_layer=dict(type='DropPath', drop_prob=0.),
init_cfg=None):
super().__init__(init_cfg)
self.window_size = window_size
self.shift_size = shift_size
assert 0 <= self.shift_size < self.window_size
self.w_msa = WindowMSA(
embed_dims=embed_dims,
num_heads=num_heads,
window_size=to_2tuple(window_size),
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=proj_drop_rate,
init_cfg=None)
self.drop = build_dropout(dropout_layer)
def forward(self, query, hw_shape):
B, L, C = query.shape
H, W = hw_shape
assert L == H * W, 'input feature has wrong size'
query = query.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b))
H_pad, W_pad = query.shape[1], query.shape[2]
# cyclic shift
if self.shift_size > 0:
shifted_query = torch.roll(
query,
shifts=(-self.shift_size, -self.shift_size),
dims=(1, 2))
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device)
h_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = self.window_partition(img_mask)
mask_windows = mask_windows.view(
-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
else:
shifted_query = query
attn_mask = None
# nW*B, window_size, window_size, C
query_windows = self.window_partition(shifted_query)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, self.window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size,
self.window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, H_pad, W_pad)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x,
shifts=(self.shift_size, self.shift_size),
dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x
def window_reverse(self, windows, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
window_size = self.window_size
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size,
window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
def window_partition(self, x):
"""
Args:
x: (B, H, W, C)
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
window_size = self.window_size
x = x.view(B, H // window_size, window_size, W // window_size,
window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()
windows = windows.view(-1, window_size, window_size, C)
return windows
class SwinBlock(BaseModule):
""""
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
window_size (int, optional): The local window scale. Default: 7.
shift (bool, optional): whether to shift window or not. Default False.
qkv_bias (bool, optional): enable bias for qkv if True. Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
drop_rate (float, optional): Dropout rate. Default: 0.
attn_drop_rate (float, optional): Attention dropout rate. Default: 0.
drop_path_rate (float, optional): Stochastic depth rate. Default: 0.
act_cfg (dict, optional): The config dict of activation function.
Default: dict(type='GELU').
norm_cfg (dict, optional): The config dict of normalization.
Default: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
init_cfg (dict | list | None, optional): The init config.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
window_size=7,
shift=False,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
init_cfg=None):
super(SwinBlock, self).__init__()
self.init_cfg = init_cfg
self.with_cp = with_cp
self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = ShiftWindowMSA(
embed_dims=embed_dims,
num_heads=num_heads,
window_size=window_size,
shift_size=window_size // 2 if shift else 0,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
init_cfg=None)
self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=2,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg,
add_identity=True,
init_cfg=None)
def forward(self, x, hw_shape):
def _inner_forward(x):
identity = x
x = self.norm1(x)
x = self.attn(x, hw_shape)
x = x + identity
identity = x
x = self.norm2(x)
x = self.ffn(x, identity=identity)
return x
if self.with_cp and x.requires_grad:
x = cp.checkpoint(_inner_forward, x)
else:
x = _inner_forward(x)
return x
class SwinBlockSequence(BaseModule):
"""Implements one stage in Swin Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
depth (int): The number of blocks in this stage.
window_size (int, optional): The local window scale. Default: 7.
qkv_bias (bool, optional): enable bias for qkv if True. Default: True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
drop_rate (float, optional): Dropout rate. Default: 0.
attn_drop_rate (float, optional): Attention dropout rate. Default: 0.
drop_path_rate (float | list[float], optional): Stochastic depth
rate. Default: 0.
downsample (BaseModule | None, optional): The downsample operation
module. Default: None.
act_cfg (dict, optional): The config dict of activation function.
Default: dict(type='GELU').
norm_cfg (dict, optional): The config dict of normalization.
Default: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
init_cfg (dict | list | None, optional): The init config.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
depth,
window_size=7,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
downsample=None,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
init_cfg=None):
super().__init__(init_cfg=init_cfg)
if isinstance(drop_path_rate, list):
drop_path_rates = drop_path_rate
assert len(drop_path_rates) == depth
else:
drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]
self.blocks = ModuleList()
for i in range(depth):
block = SwinBlock(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=feedforward_channels,
window_size=window_size,
shift=False if i % 2 == 0 else True,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rates[i],
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
init_cfg=None)
self.blocks.append(block)
self.downsample = downsample
def forward(self, x, hw_shape):
for block in self.blocks:
x = block(x, hw_shape)
if self.downsample:
x_down, down_hw_shape = self.downsample(x, hw_shape)
return x_down, down_hw_shape, x, hw_shape
else:
return x, hw_shape, x, hw_shape
@MODELS.register_module()
class SwinTransformer(BaseModule):
""" Swin Transformer
A PyTorch implement of : `Swin Transformer:
Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/abs/2103.14030
Inspiration from
https://github.com/microsoft/Swin-Transformer
Args:
pretrain_img_size (int | tuple[int]): The size of input image when
pretrain. Defaults: 224.
in_channels (int): The num of input channels.
Defaults: 3.
embed_dims (int): The feature dimension. Default: 96.
patch_size (int | tuple[int]): Patch size. Default: 4.
window_size (int): Window size. Default: 7.
mlp_ratio (int): Ratio of mlp hidden dim to embedding dim.
Default: 4.
depths (tuple[int]): Depths of each Swin Transformer stage.
Default: (2, 2, 6, 2).
num_heads (tuple[int]): Parallel attention heads of each Swin
Transformer stage. Default: (3, 6, 12, 24).
strides (tuple[int]): The patch merging or patch embedding stride of
each Swin Transformer stage. (In swin, we set kernel size equal to
stride.) Default: (4, 2, 2, 2).
out_indices (tuple[int]): Output from which stages.
Default: (0, 1, 2, 3).
qkv_bias (bool, optional): If True, add a learnable bias to query, key,
value. Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
patch_norm (bool): If add a norm layer for patch embed and patch
merging. Default: True.
drop_rate (float): Dropout rate. Defaults: 0.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Defaults: 0.1.
use_abs_pos_embed (bool): If True, add absolute position embedding to
the patch embedding. Defaults: False.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer at
output of backone. Defaults: dict(type='LN').
with_cp (bool, optional): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
pretrained (str, optional): model pretrained path. Default: None.
convert_weights (bool): The flag indicates whether the
pre-trained model is from the original repo. We may need
to convert some keys to make it compatible.
Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
Default: -1 (-1 means not freezing any parameters).
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
pretrain_img_size=224,
in_channels=3,
embed_dims=96,
patch_size=4,
window_size=7,
mlp_ratio=4,
depths=(2, 2, 6, 2),
num_heads=(3, 6, 12, 24),
strides=(4, 2, 2, 2),
out_indices=(0, 1, 2, 3),
qkv_bias=True,
qk_scale=None,
patch_norm=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.1,
use_abs_pos_embed=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
with_cp=False,
pretrained=None,
convert_weights=False,
frozen_stages=-1,
init_cfg=None):
self.convert_weights = convert_weights
self.frozen_stages = frozen_stages
if isinstance(pretrain_img_size, int):
pretrain_img_size = to_2tuple(pretrain_img_size)
elif isinstance(pretrain_img_size, tuple):
if len(pretrain_img_size) == 1:
pretrain_img_size = to_2tuple(pretrain_img_size[0])
assert len(pretrain_img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(pretrain_img_size)}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = init_cfg
else:
raise TypeError('pretrained must be a str or None')
super(SwinTransformer, self).__init__(init_cfg=init_cfg)
num_layers = len(depths)
self.out_indices = out_indices
self.use_abs_pos_embed = use_abs_pos_embed
assert strides[0] == patch_size, 'Use non-overlapping patch embed.'
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=strides[0],
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None)
if self.use_abs_pos_embed:
patch_row = pretrain_img_size[0] // patch_size
patch_col = pretrain_img_size[1] // patch_size
num_patches = patch_row * patch_col
self.absolute_pos_embed = nn.Parameter(
torch.zeros((1, num_patches, embed_dims)))
self.drop_after_pos = nn.Dropout(p=drop_rate)
# set stochastic depth decay rule
total_depth = sum(depths)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, total_depth)
]
self.stages = ModuleList()
in_channels = embed_dims
for i in range(num_layers):
if i < num_layers - 1:
downsample = PatchMerging(
in_channels=in_channels,
out_channels=2 * in_channels,
stride=strides[i + 1],
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None)
else:
downsample = None
stage = SwinBlockSequence(
embed_dims=in_channels,
num_heads=num_heads[i],
feedforward_channels=mlp_ratio * in_channels,
depth=depths[i],
window_size=window_size,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],
downsample=downsample,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp,
init_cfg=None)
self.stages.append(stage)
if downsample:
in_channels = downsample.out_channels
self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)]
# Add a norm layer for each output
for i in out_indices:
layer = build_norm_layer(norm_cfg, self.num_features[i])[1]
layer_name = f'norm{i}'
self.add_module(layer_name, layer)
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.use_abs_pos_embed:
self.absolute_pos_embed.requires_grad = False
self.drop_after_pos.eval()
for i in range(1, self.frozen_stages + 1):
if (i - 1) in self.out_indices:
norm_layer = getattr(self, f'norm{i-1}')
norm_layer.eval()
for param in norm_layer.parameters():
param.requires_grad = False
m = self.stages[i - 1]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
logger = MMLogger.get_current_instance()
if self.init_cfg is None:
logger.warn(f'No pre-trained weights for '
f'{self.__class__.__name__}, '
f'training start from scratch')
if self.use_abs_pos_embed:
trunc_normal_(self.absolute_pos_embed, std=0.02)
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, nn.LayerNorm):
constant_init(m, 1.0)
else:
assert 'checkpoint' in self.init_cfg, f'Only support ' \
f'specify `Pretrained` in ' \
f'`init_cfg` in ' \
f'{self.__class__.__name__} '
ckpt = CheckpointLoader.load_checkpoint(
self.init_cfg.checkpoint, logger=logger, map_location='cpu')
if 'state_dict' in ckpt:
_state_dict = ckpt['state_dict']
elif 'model' in ckpt:
_state_dict = ckpt['model']
else:
_state_dict = ckpt
if self.convert_weights:
# supported loading weight from original repo,
_state_dict = swin_converter(_state_dict)
state_dict = OrderedDict()
for k, v in _state_dict.items():
if k.startswith('backbone.'):
state_dict[k[9:]] = v
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# reshape absolute position embedding
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = self.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning('Error in loading absolute_pos_embed, pass')
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(
N2, H, W, C2).permute(0, 3, 1, 2).contiguous()
# interpolate position bias table if needed
relative_position_bias_table_keys = [
k for k in state_dict.keys()
if 'relative_position_bias_table' in k
]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = self.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f'Error in loading {table_key}, pass')
elif L1 != L2:
S1 = int(L1**0.5)
S2 = int(L2**0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1),
size=(S2, S2),
mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(
nH2, L2).permute(1, 0).contiguous()
# load state_dict
self.load_state_dict(state_dict, False)
def forward(self, x):
x, hw_shape = self.patch_embed(x)
if self.use_abs_pos_embed:
x = x + self.absolute_pos_embed
x = self.drop_after_pos(x)
outs = []
for i, stage in enumerate(self.stages):
x, hw_shape, out, out_hw_shape = stage(x, hw_shape)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
out = norm_layer(out)
out = out.view(-1, *out_hw_shape,
self.num_features[i]).permute(0, 3, 1,
2).contiguous()
outs.append(out)
return outs
def swin_converter(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt['backbone.' + new_k] = new_v
return new_ckpt
| 31,958 | 37.97439 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/trident_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule
from torch.nn.modules.utils import _pair
from mmdet.models.backbones.resnet import Bottleneck, ResNet
from mmdet.registry import MODELS
class TridentConv(BaseModule):
"""Trident Convolution Module.
Args:
in_channels (int): Number of channels in input.
out_channels (int): Number of channels in output.
kernel_size (int): Size of convolution kernel.
stride (int, optional): Convolution stride. Default: 1.
trident_dilations (tuple[int, int, int], optional): Dilations of
different trident branch. Default: (1, 2, 3).
test_branch_idx (int, optional): In inference, all 3 branches will
be used if `test_branch_idx==-1`, otherwise only branch with
index `test_branch_idx` will be used. Default: 1.
bias (bool, optional): Whether to use bias in convolution or not.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
trident_dilations=(1, 2, 3),
test_branch_idx=1,
bias=False,
init_cfg=None):
super(TridentConv, self).__init__(init_cfg)
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if self.training or self.test_branch_idx == -1:
outputs = [
F.conv2d(input, self.weight, self.bias, self.stride, padding,
dilation) for input, dilation, padding in zip(
inputs, self.dilations, self.paddings)
]
else:
assert len(inputs) == 1
outputs = [
F.conv2d(inputs[0], self.weight, self.bias, self.stride,
self.paddings[self.test_branch_idx],
self.dilations[self.test_branch_idx])
]
return outputs
# Since TridentNet is defined over ResNet50 and ResNet101, here we
# only support TridentBottleneckBlock.
class TridentBottleneck(Bottleneck):
"""BottleBlock for TridentResNet.
Args:
trident_dilations (tuple[int, int, int]): Dilations of different
trident branch.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
concat_output (bool): Whether to concat the output list to a Tensor.
`True` only in the last Block.
"""
def __init__(self, trident_dilations, test_branch_idx, concat_output,
**kwargs):
super(TridentBottleneck, self).__init__(**kwargs)
self.trident_dilations = trident_dilations
self.num_branch = len(trident_dilations)
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
self.conv2 = TridentConv(
self.planes,
self.planes,
kernel_size=3,
stride=self.conv2_stride,
bias=False,
trident_dilations=self.trident_dilations,
test_branch_idx=test_branch_idx,
init_cfg=dict(
type='Kaiming',
distribution='uniform',
mode='fan_in',
override=dict(name='conv2')))
def forward(self, x):
def _inner_forward(x):
num_branch = (
self.num_branch
if self.training or self.test_branch_idx == -1 else 1)
identity = x
if not isinstance(x, list):
x = (x, ) * num_branch
identity = x
if self.downsample is not None:
identity = [self.downsample(b) for b in x]
out = [self.conv1(b) for b in x]
out = [self.norm1(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv1_plugin_names)
out = self.conv2(out)
out = [self.norm2(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv2_plugin_names)
out = [self.conv3(b) for b in out]
out = [self.norm3(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv3_plugin_names)
out = [
out_b + identity_b for out_b, identity_b in zip(out, identity)
]
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = [self.relu(b) for b in out]
if self.concat_output:
out = torch.cat(out, dim=0)
return out
def make_trident_res_layer(block,
inplanes,
planes,
num_blocks,
stride=1,
trident_dilations=(1, 2, 3),
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
test_branch_idx=-1):
"""Build Trident Res Layers."""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride if i == 0 else 1,
trident_dilations=trident_dilations,
downsample=downsample if i == 0 else None,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=plugins,
test_branch_idx=test_branch_idx,
concat_output=True if i == num_blocks - 1 else False))
inplanes = planes * block.expansion
return nn.Sequential(*layers)
@MODELS.register_module()
class TridentResNet(ResNet):
"""The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
normal BottleBlock to yield trident output. Different branch shares the
convolution weight but uses different dilations to achieve multi-scale
output.
/ stage3(b0) \
x - stem - stage1 - stage2 - stage3(b1) - output
\ stage3(b2) /
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
trident_dilations (tuple[int]): Dilations of different trident branch.
len(trident_dilations) should be equal to num_branch.
""" # noqa
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
**kwargs):
assert num_branch == len(trident_dilations)
assert depth in (50, 101, 152)
super(TridentResNet, self).__init__(depth, **kwargs)
assert self.num_stages == 3
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = self.num_stages - 1
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins,
last_stage_idx)
else:
stage_plugins = None
planes = self.base_channels * 2**last_stage_idx
res_layer = make_trident_res_layer(
TridentBottleneck,
inplanes=(self.block.expansion * self.base_channels *
2**(last_stage_idx - 1)),
planes=planes,
num_blocks=self.stage_blocks[last_stage_idx],
stride=stride,
trident_dilations=dilation,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
test_branch_idx=self.test_branch_idx)
layer_name = f'layer{last_stage_idx + 1}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages()
| 11,120 | 36.19398 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/detectors_resnext.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmdet.registry import MODELS
from .detectors_resnet import Bottleneck as _Bottleneck
from .detectors_resnet import DetectoRS_ResNet
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self,
inplanes,
planes,
groups=1,
base_width=4,
base_channels=64,
**kwargs):
"""Bottleneck block for ResNeXt.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if groups == 1:
width = self.planes
else:
width = math.floor(self.planes *
(base_width / base_channels)) * groups
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, width, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
self.norm_cfg, width, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
self.norm_cfg, self.planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
self.conv_cfg,
self.inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
elif not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
self.conv_cfg,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
self.dcn,
width,
width,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
groups=groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
self.conv_cfg,
width,
self.planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
@MODELS.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
"""ResNeXt backbone for DetectoRS.
Args:
groups (int): The number of groups in ResNeXt.
base_width (int): The base width of ResNeXt.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(
groups=self.groups,
base_width=self.base_width,
base_channels=self.base_channels,
**kwargs)
| 3,919 | 30.612903 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/efficientnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import math
from functools import partial
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn.bricks import ConvModule, DropPath
from mmengine.model import BaseModule, Sequential
from mmdet.registry import MODELS
from ..layers import InvertedResidual, SELayer
from ..utils import make_divisible
class EdgeResidual(BaseModule):
"""Edge Residual Block.
Args:
in_channels (int): The input channels of this module.
out_channels (int): The output channels of this module.
mid_channels (int): The input channels of the second convolution.
kernel_size (int): The kernel size of the first convolution.
Defaults to 3.
stride (int): The stride of the first convolution. Defaults to 1.
se_cfg (dict, optional): Config dict for se layer. Defaults to None,
which means no se layer.
with_residual (bool): Use residual connection. Defaults to True.
conv_cfg (dict, optional): Config dict for convolution layer.
Defaults to None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to ``dict(type='BN')``.
act_cfg (dict): Config dict for activation layer.
Defaults to ``dict(type='ReLU')``.
drop_path_rate (float): stochastic depth rate. Defaults to 0.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Defaults to False.
init_cfg (dict | list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_residual=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
drop_path_rate=0.,
with_cp=False,
init_cfg=None,
**kwargs):
super(EdgeResidual, self).__init__(init_cfg=init_cfg)
assert stride in [1, 2]
self.with_cp = with_cp
self.drop_path = DropPath(
drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.with_se = se_cfg is not None
self.with_residual = (
stride == 1 and in_channels == out_channels and with_residual)
if self.with_se:
assert isinstance(se_cfg, dict)
self.conv1 = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=1,
padding=kernel_size // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.conv2 = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
out = self.conv1(out)
if self.with_se:
out = self.se(out)
out = self.conv2(out)
if self.with_residual:
return x + self.drop_path(out)
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
def model_scaling(layer_setting, arch_setting):
"""Scaling operation to the layer's parameters according to the
arch_setting."""
# scale width
new_layer_setting = copy.deepcopy(layer_setting)
for layer_cfg in new_layer_setting:
for block_cfg in layer_cfg:
block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8)
# scale depth
split_layer_setting = [new_layer_setting[0]]
for layer_cfg in new_layer_setting[1:-1]:
tmp_index = [0]
for i in range(len(layer_cfg) - 1):
if layer_cfg[i + 1][1] != layer_cfg[i][1]:
tmp_index.append(i + 1)
tmp_index.append(len(layer_cfg))
for i in range(len(tmp_index) - 1):
split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i +
1]])
split_layer_setting.append(new_layer_setting[-1])
num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]]
new_layers = [
int(math.ceil(arch_setting[1] * num)) for num in num_of_layers
]
merge_layer_setting = [split_layer_setting[0]]
for i, layer_cfg in enumerate(split_layer_setting[1:-1]):
if new_layers[i] <= num_of_layers[i]:
tmp_layer_cfg = layer_cfg[:new_layers[i]]
else:
tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * (
new_layers[i] - num_of_layers[i])
if tmp_layer_cfg[0][3] == 1 and i != 0:
merge_layer_setting[-1] += tmp_layer_cfg.copy()
else:
merge_layer_setting.append(tmp_layer_cfg.copy())
merge_layer_setting.append(split_layer_setting[-1])
return merge_layer_setting
@MODELS.register_module()
class EfficientNet(BaseModule):
"""EfficientNet backbone.
Args:
arch (str): Architecture of efficientnet. Defaults to b0.
out_indices (Sequence[int]): Output from which stages.
Defaults to (6, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Defaults to 0, which means not freezing any parameters.
conv_cfg (dict): Config dict for convolution layer.
Defaults to None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='Swish').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Defaults to False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Defaults to False.
"""
# Parameters to build layers.
# 'b' represents the architecture of normal EfficientNet family includes
# 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'.
# 'e' represents the architecture of EfficientNet-EdgeTPU including 'es',
# 'em', 'el'.
# 6 parameters are needed to construct a layer, From left to right:
# - kernel_size: The kernel size of the block
# - out_channel: The number of out_channels of the block
# - se_ratio: The sequeeze ratio of SELayer.
# - stride: The stride of the block
# - expand_ratio: The expand_ratio of the mid_channels
# - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual
layer_settings = {
'b': [[[3, 32, 0, 2, 0, -1]],
[[3, 16, 4, 1, 1, 0]],
[[3, 24, 4, 2, 6, 0],
[3, 24, 4, 1, 6, 0]],
[[5, 40, 4, 2, 6, 0],
[5, 40, 4, 1, 6, 0]],
[[3, 80, 4, 2, 6, 0],
[3, 80, 4, 1, 6, 0],
[3, 80, 4, 1, 6, 0],
[5, 112, 4, 1, 6, 0],
[5, 112, 4, 1, 6, 0],
[5, 112, 4, 1, 6, 0]],
[[5, 192, 4, 2, 6, 0],
[5, 192, 4, 1, 6, 0],
[5, 192, 4, 1, 6, 0],
[5, 192, 4, 1, 6, 0],
[3, 320, 4, 1, 6, 0]],
[[1, 1280, 0, 1, 0, -1]]
],
'e': [[[3, 32, 0, 2, 0, -1]],
[[3, 24, 0, 1, 3, 1]],
[[3, 32, 0, 2, 8, 1],
[3, 32, 0, 1, 8, 1]],
[[3, 48, 0, 2, 8, 1],
[3, 48, 0, 1, 8, 1],
[3, 48, 0, 1, 8, 1],
[3, 48, 0, 1, 8, 1]],
[[5, 96, 0, 2, 8, 0],
[5, 96, 0, 1, 8, 0],
[5, 96, 0, 1, 8, 0],
[5, 96, 0, 1, 8, 0],
[5, 96, 0, 1, 8, 0],
[5, 144, 0, 1, 8, 0],
[5, 144, 0, 1, 8, 0],
[5, 144, 0, 1, 8, 0],
[5, 144, 0, 1, 8, 0]],
[[5, 192, 0, 2, 8, 0],
[5, 192, 0, 1, 8, 0]],
[[1, 1280, 0, 1, 0, -1]]
]
} # yapf: disable
# Parameters to build different kinds of architecture.
# From left to right: scaling factor for width, scaling factor for depth,
# resolution.
arch_settings = {
'b0': (1.0, 1.0, 224),
'b1': (1.0, 1.1, 240),
'b2': (1.1, 1.2, 260),
'b3': (1.2, 1.4, 300),
'b4': (1.4, 1.8, 380),
'b5': (1.6, 2.2, 456),
'b6': (1.8, 2.6, 528),
'b7': (2.0, 3.1, 600),
'b8': (2.2, 3.6, 672),
'es': (1.0, 1.0, 224),
'em': (1.0, 1.1, 240),
'el': (1.2, 1.4, 300)
}
def __init__(self,
arch='b0',
drop_path_rate=0.,
out_indices=(6, ),
frozen_stages=0,
conv_cfg=dict(type='Conv2dAdaptivePadding'),
norm_cfg=dict(type='BN', eps=1e-3),
act_cfg=dict(type='Swish'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
layer=['_BatchNorm', 'GroupNorm'],
val=1)
]):
super(EfficientNet, self).__init__(init_cfg)
assert arch in self.arch_settings, \
f'"{arch}" is not one of the arch_settings ' \
f'({", ".join(self.arch_settings.keys())})'
self.arch_setting = self.arch_settings[arch]
self.layer_setting = self.layer_settings[arch[:1]]
for index in out_indices:
if index not in range(0, len(self.layer_setting)):
raise ValueError('the item in out_indices must in '
f'range(0, {len(self.layer_setting)}). '
f'But received {index}')
if frozen_stages not in range(len(self.layer_setting) + 1):
raise ValueError('frozen_stages must be in range(0, '
f'{len(self.layer_setting) + 1}). '
f'But received {frozen_stages}')
self.drop_path_rate = drop_path_rate
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.layer_setting = model_scaling(self.layer_setting,
self.arch_setting)
block_cfg_0 = self.layer_setting[0][0]
block_cfg_last = self.layer_setting[-1][0]
self.in_channels = make_divisible(block_cfg_0[1], 8)
self.out_channels = block_cfg_last[1]
self.layers = nn.ModuleList()
self.layers.append(
ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=block_cfg_0[0],
stride=block_cfg_0[3],
padding=block_cfg_0[0] // 2,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.make_layer()
# Avoid building unused layers in mmdetection.
if len(self.layers) < max(self.out_indices) + 1:
self.layers.append(
ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=block_cfg_last[0],
stride=block_cfg_last[3],
padding=block_cfg_last[0] // 2,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def make_layer(self):
# Without the first and the final conv block.
layer_setting = self.layer_setting[1:-1]
total_num_blocks = sum([len(x) for x in layer_setting])
block_idx = 0
dpr = [
x.item()
for x in torch.linspace(0, self.drop_path_rate, total_num_blocks)
] # stochastic depth decay rule
for i, layer_cfg in enumerate(layer_setting):
# Avoid building unused layers in mmdetection.
if i > max(self.out_indices) - 1:
break
layer = []
for i, block_cfg in enumerate(layer_cfg):
(kernel_size, out_channels, se_ratio, stride, expand_ratio,
block_type) = block_cfg
mid_channels = int(self.in_channels * expand_ratio)
out_channels = make_divisible(out_channels, 8)
if se_ratio <= 0:
se_cfg = None
else:
# In mmdetection, the `divisor` is deleted to align
# the logic of SELayer with mmcls.
se_cfg = dict(
channels=mid_channels,
ratio=expand_ratio * se_ratio,
act_cfg=(self.act_cfg, dict(type='Sigmoid')))
if block_type == 1: # edge tpu
if i > 0 and expand_ratio == 3:
with_residual = False
expand_ratio = 4
else:
with_residual = True
mid_channels = int(self.in_channels * expand_ratio)
if se_cfg is not None:
# In mmdetection, the `divisor` is deleted to align
# the logic of SELayer with mmcls.
se_cfg = dict(
channels=mid_channels,
ratio=se_ratio * expand_ratio,
act_cfg=(self.act_cfg, dict(type='Sigmoid')))
block = partial(EdgeResidual, with_residual=with_residual)
else:
block = InvertedResidual
layer.append(
block(
in_channels=self.in_channels,
out_channels=out_channels,
mid_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
se_cfg=se_cfg,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
drop_path_rate=dpr[block_idx],
with_cp=self.with_cp,
# In mmdetection, `with_expand_conv` is set to align
# the logic of InvertedResidual with mmcls.
with_expand_conv=(mid_channels != self.in_channels)))
self.in_channels = out_channels
block_idx += 1
self.layers.append(Sequential(*layer))
def forward(self, x):
outs = []
for i, layer in enumerate(self.layers):
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
for i in range(self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(EfficientNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
| 16,240 | 37.761337 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from mmengine.model import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
from ..layers import ResLayer
class BasicBlock(BaseModule):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(BaseModule):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_cfg=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__(init_cfg)
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(out)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@MODELS.register_module()
class ResNet(BaseModule):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
stem_channels (int | None): Number of stem channels. If not specified,
it will be the same as `base_channels`. Default: None.
base_channels (int): Number of base channels of res layer. Default: 64.
in_channels (int): Number of input image channels. Default: 3.
num_stages (int): Resnet stages. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert
plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=None,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
with_cp=False,
zero_init_residual=True,
pretrained=None,
init_cfg=None):
super(ResNet, self).__init__(init_cfg)
self.zero_init_residual = zero_init_residual
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
block_init_cfg = None
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if block is BasicBlock:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm2'))
elif block is Bottleneck:
block_init_cfg = dict(
type='Constant',
val=0,
override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
if stem_channels is None:
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
init_cfg=block_init_cfg)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""Make plugins for ResNet ``stage_idx`` th stage.
Currently we support to insert ``context_block``,
``empirical_attention_block``, ``nonlocal_block`` into the backbone
like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be:
Examples:
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
.. code-block:: none
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@MODELS.register_module()
class ResNetV1d(ResNet):
r"""ResNetV1d variant described in `Bag of Tricks
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 23,840 | 34.424963 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/detectors_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.logging import MMLogger
from mmengine.model import Sequential, constant_init, kaiming_init
from mmengine.runner.checkpoint import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.registry import MODELS
from .resnet import BasicBlock
from .resnet import Bottleneck as _Bottleneck
from .resnet import ResNet
class Bottleneck(_Bottleneck):
r"""Bottleneck for the ResNet backbone in `DetectoRS
<https://arxiv.org/pdf/2006.02334.pdf>`_.
This bottleneck allows the users to specify whether to use
SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
Args:
inplanes (int): The number of input channels.
planes (int): The number of output channels before expansion.
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
sac (dict, optional): Dictionary to construct SAC. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
expansion = 4
def __init__(self,
inplanes,
planes,
rfp_inplanes=None,
sac=None,
init_cfg=None,
**kwargs):
super(Bottleneck, self).__init__(
inplanes, planes, init_cfg=init_cfg, **kwargs)
assert sac is None or isinstance(sac, dict)
self.sac = sac
self.with_sac = sac is not None
if self.with_sac:
self.conv2 = build_conv_layer(
self.sac,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=self.dilation,
dilation=self.dilation,
bias=False)
self.rfp_inplanes = rfp_inplanes
if self.rfp_inplanes:
self.rfp_conv = build_conv_layer(
None,
self.rfp_inplanes,
planes * self.expansion,
1,
stride=1,
bias=True)
if init_cfg is None:
self.init_cfg = dict(
type='Constant', val=0, override=dict(name='rfp_conv'))
def rfp_forward(self, x, rfp_feat):
"""The forward function that also takes the RFP features as input."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
if self.rfp_inplanes:
rfp_feat = self.rfp_conv(rfp_feat)
out = out + rfp_feat
out = self.relu(out)
return out
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone for RPF in detectoRS.
The difference between this module and base class is that we pass
``rfp_inplanes`` to the first block.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
rfp_inplanes=None,
**kwargs):
self.block = block
assert downsample_first, f'downsample_first={downsample_first} is ' \
'not supported in DetectoRS'
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
rfp_inplanes=rfp_inplanes,
**kwargs))
inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
@MODELS.register_module()
class DetectoRS_ResNet(ResNet):
"""ResNet backbone for DetectoRS.
Args:
sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
Convolution). Default: None.
stage_with_sac (list): Which stage to use sac. Default: (False, False,
False, False).
rfp_inplanes (int, optional): The number of channels from RFP.
Default: None. If specified, an additional conv layer will be
added for ``rfp_feat``. Otherwise, the structure is the same as
base class.
output_img (bool): If ``True``, the input image will be inserted into
the starting position of output. Default: False.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
sac=None,
stage_with_sac=(False, False, False, False),
rfp_inplanes=None,
output_img=False,
pretrained=None,
init_cfg=None,
**kwargs):
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
self.pretrained = pretrained
if init_cfg is not None:
assert isinstance(init_cfg, dict), \
f'init_cfg must be a dict, but got {type(init_cfg)}'
if 'type' in init_cfg:
assert init_cfg.get('type') == 'Pretrained', \
'Only can initialize module by loading a pretrained model'
else:
raise KeyError('`init_cfg` must contain the key "type"')
self.pretrained = init_cfg.get('checkpoint')
self.sac = sac
self.stage_with_sac = stage_with_sac
self.rfp_inplanes = rfp_inplanes
self.output_img = output_img
super(DetectoRS_ResNet, self).__init__(**kwargs)
self.inplanes = self.stem_channels
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
sac = self.sac if self.stage_with_sac[i] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
sac=sac,
rfp_inplanes=rfp_inplanes if i > 0 else None,
plugins=stage_plugins)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
# In order to be properly initialized by RFP
def init_weights(self):
# Calling this method will cause parameter initialization exception
# super(DetectoRS_ResNet, self).init_weights()
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m.conv2, 'conv_offset'):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
return ResLayer(**kwargs)
def forward(self, x):
"""Forward function."""
outs = list(super(DetectoRS_ResNet, self).forward(x))
if self.output_img:
outs.insert(0, x)
return tuple(outs)
def rfp_forward(self, x, rfp_feats):
"""Forward function for RFP."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
rfp_feat = rfp_feats[i] if i > 0 else None
for layer in res_layer:
x = layer.rfp_forward(x, rfp_feat)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 12,764 | 35.059322 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/backbones/ssd_vgg.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from ..necks import ssd_neck
@MODELS.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
| 4,707 | 35.496124 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.