repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ERD
|
ERD-main/mmdet/evaluation/functional/bbox_overlaps.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def bbox_overlaps(bboxes1,
bboxes2,
mode='iou',
eps=1e-6,
use_legacy_coordinate=False):
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
Args:
bboxes1 (ndarray): Shape (n, 4)
bboxes2 (ndarray): Shape (k, 4)
mode (str): IOU (intersection over union) or IOF (intersection
over foreground)
use_legacy_coordinate (bool): Whether to use coordinate system in
mmdet v1.x. which means width, height should be
calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.
Note when function is used in `VOCDataset`, it should be
True to align with the official implementation
`http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar`
Default: False.
Returns:
ious (ndarray): Shape (n, k)
"""
assert mode in ['iou', 'iof']
if not use_legacy_coordinate:
extra_length = 0.
else:
extra_length = 1.
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * (
bboxes1[:, 3] - bboxes1[:, 1] + extra_length)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * (
bboxes2[:, 3] - bboxes2[:, 1] + extra_length)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum(
y_end - y_start + extra_length, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
union = np.maximum(union, eps)
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
| 2,454 | 36.19697 | 86 |
py
|
ERD
|
ERD-main/mmdet/evaluation/functional/mean_ap.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from multiprocessing import Pool
import numpy as np
from mmengine.logging import print_log
from mmengine.utils import is_str
from terminaltables import AsciiTable
from .bbox_overlaps import bbox_overlaps
from .class_names import get_classes
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision (for single or multiple scales).
Args:
recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
mode (str): 'area' or '11points', 'area' means calculating the area
under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1]
Returns:
float or ndarray: calculated average precision
"""
no_scale = False
if recalls.ndim == 1:
no_scale = True
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'area':
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
elif mode == '11points':
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
if no_scale:
ap = ap[0]
return ap
def tpfp_imagenet(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
default_iou_thr=0.5,
area_ranges=None,
use_legacy_coordinate=False,
**kwargs):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Defaults to None
default_iou_thr (float): IoU threshold to be considered as matched for
medium and large bboxes (small ones have special rules).
Defaults to 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. Defaults to None.
use_legacy_coordinate (bool): Whether to use coordinate system in
mmdet v1.x. which means width, height should be
calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.
Defaults to False.
Returns:
tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
each array is (num_scales, m).
"""
if not use_legacy_coordinate:
extra_length = 0.
else:
extra_length = 1.
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0],
dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp
# of a certain scale.
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (
det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(
det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate)
gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length
gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length
iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
default_iou_thr)
# sort all detections by scores in descending order
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = gt_w * gt_h
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
max_iou = -1
matched_gt = -1
# find best overlapped available gt
for j in range(num_gts):
# different from PASCAL VOC: allow finding other gts if the
# best overlapped ones are already matched by other det bboxes
if gt_covered[j]:
continue
elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
max_iou = ious[i, j]
matched_gt = j
# there are 4 cases for a det bbox:
# 1. it matches a gt, tp = 1, fp = 0
# 2. it matches an ignored gt, tp = 0, fp = 0
# 3. it matches no gt and within area range, tp = 0, fp = 1
# 4. it matches no gt but is beyond area range, tp = 0, fp = 0
if matched_gt >= 0:
gt_covered[matched_gt] = 1
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
tp[k, i] = 1
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0] + extra_length) * (
bbox[3] - bbox[1] + extra_length)
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def tpfp_default(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
iou_thr=0.5,
area_ranges=None,
use_legacy_coordinate=False,
**kwargs):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Defaults to None
iou_thr (float): IoU threshold to be considered as matched.
Defaults to 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be
evaluated, in the format [(min1, max1), (min2, max2), ...].
Defaults to None.
use_legacy_coordinate (bool): Whether to use coordinate system in
mmdet v1.x. which means width, height should be
calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.
Defaults to False.
Returns:
tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
each array is (num_scales, m).
"""
if not use_legacy_coordinate:
extra_length = 0.
else:
extra_length = 1.
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0],
dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
# a certain scale
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
# if there is no gt bboxes in this image, then all det bboxes
# within area range are false positives
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (
det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(
det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate)
# for each det, the max iou with all gts
ious_max = ious.max(axis=1)
# for each det, which gt overlaps most with it
ious_argmax = ious.argmax(axis=1)
# sort all dets in descending order by scores
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length)
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[k, i] = 1
else:
fp[k, i] = 1
# otherwise ignore this detected bbox, tp = 0, fp = 0
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0] + extra_length) * (
bbox[3] - bbox[1] + extra_length)
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def tpfp_openimages(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
iou_thr=0.5,
area_ranges=None,
use_legacy_coordinate=False,
gt_bboxes_group_of=None,
use_group_of=True,
ioa_thr=0.5,
**kwargs):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Defaults to None
iou_thr (float): IoU threshold to be considered as matched.
Defaults to 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be
evaluated, in the format [(min1, max1), (min2, max2), ...].
Defaults to None.
use_legacy_coordinate (bool): Whether to use coordinate system in
mmdet v1.x. which means width, height should be
calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.
Defaults to False.
gt_bboxes_group_of (ndarray): GT group_of of this image, of shape
(k, 1). Defaults to None
use_group_of (bool): Whether to use group of when calculate TP and FP,
which only used in OpenImages evaluation. Defaults to True.
ioa_thr (float | None): IoA threshold to be considered as matched,
which only used in OpenImages evaluation. Defaults to 0.5.
Returns:
tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where
(tp, fp) whose elements are 0 and 1. The shape of each array is
(num_scales, m). (det_bboxes) whose will filter those are not
matched by group of gts when processing Open Images evaluation.
The shape is (num_scales, m).
"""
if not use_legacy_coordinate:
extra_length = 0.
else:
extra_length = 1.
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0],
dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
# a certain scale
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
# if there is no gt bboxes in this image, then all det bboxes
# within area range are false positives
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (
det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp, det_bboxes
if gt_bboxes_group_of is not None and use_group_of:
# if handle group-of boxes, divided gt boxes into two parts:
# non-group-of and group-of.Then calculate ious and ioas through
# non-group-of group-of gts respectively. This only used in
# OpenImages evaluation.
assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0]
non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of]
group_gt_bboxes = gt_bboxes[gt_bboxes_group_of]
num_gts_group = group_gt_bboxes.shape[0]
ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes)
ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof')
else:
# if not consider group-of boxes, only calculate ious through gt boxes
ious = bbox_overlaps(
det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate)
ioas = None
if ious.shape[1] > 0:
# for each det, the max iou with all gts
ious_max = ious.max(axis=1)
# for each det, which gt overlaps most with it
ious_argmax = ious.argmax(axis=1)
# sort all dets in descending order by scores
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = (
gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length)
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[k, i] = 1
else:
fp[k, i] = 1
# otherwise ignore this detected bbox, tp = 0, fp = 0
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0] + extra_length) * (
bbox[3] - bbox[1] + extra_length)
if area >= min_area and area < max_area:
fp[k, i] = 1
else:
# if there is no no-group-of gt bboxes in this image,
# then all det bboxes within area range are false positives.
# Only used in OpenImages evaluation.
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (
det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
if ioas is None or ioas.shape[1] <= 0:
return tp, fp, det_bboxes
else:
# The evaluation of group-of TP and FP are done in two stages:
# 1. All detections are first matched to non group-of boxes; true
# positives are determined.
# 2. Detections that are determined as false positives are matched
# against group-of boxes and calculated group-of TP and FP.
# Only used in OpenImages evaluation.
det_bboxes_group = np.zeros(
(num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float)
match_group_of = np.zeros((num_scales, num_dets), dtype=bool)
tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32)
ioas_max = ioas.max(axis=1)
# for each det, which gt overlaps most with it
ioas_argmax = ioas.argmax(axis=1)
# sort all dets in descending order by scores
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
box_is_covered = tp[k]
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1])
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
matched_gt = ioas_argmax[i]
if not box_is_covered[i]:
if ioas_max[i] >= ioa_thr:
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
if not tp_group[k, matched_gt]:
tp_group[k, matched_gt] = 1
match_group_of[k, i] = True
else:
match_group_of[k, i] = True
if det_bboxes_group[k, matched_gt, -1] < \
det_bboxes[i, -1]:
det_bboxes_group[k, matched_gt] = \
det_bboxes[i]
fp_group = (tp_group <= 0).astype(float)
tps = []
fps = []
# concatenate tp, fp, and det-boxes which not matched group of
# gt boxes and tp_group, fp_group, and det_bboxes_group which
# matched group of boxes respectively.
for i in range(num_scales):
tps.append(
np.concatenate((tp[i][~match_group_of[i]], tp_group[i])))
fps.append(
np.concatenate((fp[i][~match_group_of[i]], fp_group[i])))
det_bboxes = np.concatenate(
(det_bboxes[~match_group_of[i]], det_bboxes_group[i]))
tp = np.vstack(tps)
fp = np.vstack(fps)
return tp, fp, det_bboxes
def get_cls_results(det_results, annotations, class_id):
"""Get det results and gt information of a certain class.
Args:
det_results (list[list]): Same as `eval_map()`.
annotations (list[dict]): Same as `eval_map()`.
class_id (int): ID of a specific class.
Returns:
tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
"""
cls_dets = [img_res[class_id] for img_res in det_results]
cls_gts = []
cls_gts_ignore = []
for ann in annotations:
gt_inds = ann['labels'] == class_id
cls_gts.append(ann['bboxes'][gt_inds, :])
if ann.get('labels_ignore', None) is not None:
ignore_inds = ann['labels_ignore'] == class_id
cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
else:
cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))
return cls_dets, cls_gts, cls_gts_ignore
def get_cls_group_ofs(annotations, class_id):
"""Get `gt_group_of` of a certain class, which is used in Open Images.
Args:
annotations (list[dict]): Same as `eval_map()`.
class_id (int): ID of a specific class.
Returns:
list[np.ndarray]: `gt_group_of` of a certain class.
"""
gt_group_ofs = []
for ann in annotations:
gt_inds = ann['labels'] == class_id
if ann.get('gt_is_group_ofs', None) is not None:
gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds])
else:
gt_group_ofs.append(np.empty((0, 1), dtype=bool))
return gt_group_ofs
def eval_map(det_results,
annotations,
scale_ranges=None,
iou_thr=0.5,
ioa_thr=None,
dataset=None,
logger=None,
tpfp_fn=None,
nproc=4,
use_legacy_coordinate=False,
use_group_of=False,
eval_mode='area'):
"""Evaluate mAP of a dataset.
Args:
det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotations (list[dict]): Ground truth annotations where each item of
the list indicates an image. Keys of annotations are:
- `bboxes`: numpy array of shape (n, 4)
- `labels`: numpy array of shape (n, )
- `bboxes_ignore` (optional): numpy array of shape (k, 4)
- `labels_ignore` (optional): numpy array of shape (k, )
scale_ranges (list[tuple] | None): Range of scales to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. A range of
(32, 64) means the area range between (32**2, 64**2).
Defaults to None.
iou_thr (float): IoU threshold to be considered as matched.
Defaults to 0.5.
ioa_thr (float | None): IoA threshold to be considered as matched,
which only used in OpenImages evaluation. Defaults to None.
dataset (list[str] | str | None): Dataset name or dataset classes,
there are minor differences in metrics for different datasets, e.g.
"voc", "imagenet_det", etc. Defaults to None.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmengine.logging.print_log()` for details.
Defaults to None.
tpfp_fn (callable | None): The function used to determine true/
false positives. If None, :func:`tpfp_default` is used as default
unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
case). If it is given as a function, then this function is used
to evaluate tp & fp. Default None.
nproc (int): Processes used for computing TP and FP.
Defaults to 4.
use_legacy_coordinate (bool): Whether to use coordinate system in
mmdet v1.x. which means width, height should be
calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.
Defaults to False.
use_group_of (bool): Whether to use group of when calculate TP and FP,
which only used in OpenImages evaluation. Defaults to False.
eval_mode (str): 'area' or '11points', 'area' means calculating the
area under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1],
PASCAL VOC2007 uses `11points` as default evaluate mode, while
others are 'area'. Defaults to 'area'.
Returns:
tuple: (mAP, [dict, dict, ...])
"""
assert len(det_results) == len(annotations)
assert eval_mode in ['area', '11points'], \
f'Unrecognized {eval_mode} mode, only "area" and "11points" ' \
'are supported'
if not use_legacy_coordinate:
extra_length = 0.
else:
extra_length = 1.
num_imgs = len(det_results)
num_scales = len(scale_ranges) if scale_ranges is not None else 1
num_classes = len(det_results[0]) # positive class num
area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
if scale_ranges is not None else None)
# There is no need to use multi processes to process
# when num_imgs = 1 .
if num_imgs > 1:
assert nproc > 0, 'nproc must be at least one.'
nproc = min(nproc, num_imgs)
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
# get gt and det bboxes of this class
cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
det_results, annotations, i)
# choose proper function according to datasets to compute tp and fp
if tpfp_fn is None:
if dataset in ['det', 'vid']:
tpfp_fn = tpfp_imagenet
elif dataset in ['oid_challenge', 'oid_v6'] \
or use_group_of is True:
tpfp_fn = tpfp_openimages
else:
tpfp_fn = tpfp_default
if not callable(tpfp_fn):
raise ValueError(
f'tpfp_fn has to be a function or None, but got {tpfp_fn}')
if num_imgs > 1:
# compute tp and fp for each image with multiple processes
args = []
if use_group_of:
# used in Open Images Dataset evaluation
gt_group_ofs = get_cls_group_ofs(annotations, i)
args.append(gt_group_ofs)
args.append([use_group_of for _ in range(num_imgs)])
if ioa_thr is not None:
args.append([ioa_thr for _ in range(num_imgs)])
tpfp = pool.starmap(
tpfp_fn,
zip(cls_dets, cls_gts, cls_gts_ignore,
[iou_thr for _ in range(num_imgs)],
[area_ranges for _ in range(num_imgs)],
[use_legacy_coordinate for _ in range(num_imgs)], *args))
else:
tpfp = tpfp_fn(
cls_dets[0],
cls_gts[0],
cls_gts_ignore[0],
iou_thr,
area_ranges,
use_legacy_coordinate,
gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0]
if use_group_of else None),
use_group_of=use_group_of,
ioa_thr=ioa_thr)
tpfp = [tpfp]
if use_group_of:
tp, fp, cls_dets = tuple(zip(*tpfp))
else:
tp, fp = tuple(zip(*tpfp))
# calculate gt number of each scale
# ignored gts or gts beyond the specific scale are not counted
num_gts = np.zeros(num_scales, dtype=int)
for j, bbox in enumerate(cls_gts):
if area_ranges is None:
num_gts[0] += bbox.shape[0]
else:
gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * (
bbox[:, 3] - bbox[:, 1] + extra_length)
for k, (min_area, max_area) in enumerate(area_ranges):
num_gts[k] += np.sum((gt_areas >= min_area)
& (gt_areas < max_area))
# sort all det bboxes by score, also sort tp and fp
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0]
sort_inds = np.argsort(-cls_dets[:, -1])
tp = np.hstack(tp)[:, sort_inds]
fp = np.hstack(fp)[:, sort_inds]
# calculate recall and precision with tp and fp
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
precisions = tp / np.maximum((tp + fp), eps)
# calculate AP
if scale_ranges is None:
recalls = recalls[0, :]
precisions = precisions[0, :]
num_gts = num_gts.item()
ap = average_precision(recalls, precisions, eval_mode)
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
if num_imgs > 1:
pool.close()
if scale_ranges is not None:
# shape (num_classes, num_scales)
all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
all_num_gts = np.vstack(
[cls_result['num_gts'] for cls_result in eval_results])
mean_ap = []
for i in range(num_scales):
if np.any(all_num_gts[:, i] > 0):
mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
else:
mean_ap.append(0.0)
else:
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
print_map_summary(
mean_ap, eval_results, dataset, area_ranges, logger=logger)
return mean_ap, eval_results
def print_map_summary(mean_ap,
results,
dataset=None,
scale_ranges=None,
logger=None):
"""Print mAP and results of each class.
A table will be printed to show the gts/dets/recall/AP of each class and
the mAP.
Args:
mean_ap (float): Calculated from `eval_map()`.
results (list[dict]): Calculated from `eval_map()`.
dataset (list[str] | str | None): Dataset name or dataset classes.
scale_ranges (list[tuple] | None): Range of scales to be evaluated.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmengine.logging.print_log()` for details.
Defaults to None.
"""
if logger == 'silent':
return
if isinstance(results[0]['ap'], np.ndarray):
num_scales = len(results[0]['ap'])
else:
num_scales = 1
if scale_ranges is not None:
assert len(scale_ranges) == num_scales
num_classes = len(results)
recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
aps = np.zeros((num_scales, num_classes), dtype=np.float32)
num_gts = np.zeros((num_scales, num_classes), dtype=int)
for i, cls_result in enumerate(results):
if cls_result['recall'].size > 0:
recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
aps[:, i] = cls_result['ap']
num_gts[:, i] = cls_result['num_gts']
if dataset is None:
label_names = [str(i) for i in range(num_classes)]
elif is_str(dataset):
label_names = get_classes(dataset)
else:
label_names = dataset
if not isinstance(mean_ap, list):
mean_ap = [mean_ap]
header = ['class', 'gts', 'dets', 'recall', 'ap']
for i in range(num_scales):
if scale_ranges is not None:
print_log(f'Scale range {scale_ranges[i]}', logger=logger)
table_data = [header]
for j in range(num_classes):
row_data = [
label_names[j], num_gts[i, j], results[j]['num_dets'],
f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
]
table_data.append(row_data)
table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
table = AsciiTable(table_data)
table.inner_footing_row_border = True
print_log('\n' + table.table, logger=logger)
| 33,271 | 40.957125 | 79 |
py
|
ERD
|
ERD-main/mmdet/testing/_utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from os.path import dirname, exists, join
import numpy as np
import torch
from mmengine.config import Config
from mmengine.dataset import pseudo_collate
from mmengine.structures import InstanceData, PixelData
from ..registry import TASK_UTILS
from ..structures import DetDataSample
from ..structures.bbox import HorizontalBoxes
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def get_roi_head_cfg(fname):
"""Grab configs necessary to create a roi_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
roi_head = model.roi_head
train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn
test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn
roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg))
return roi_head
def _rand_bboxes(rng, num_boxes, w, h):
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
def _rand_masks(rng, num_boxes, bboxes, img_w, img_h):
from mmdet.structures.mask import BitmapMasks
masks = np.zeros((num_boxes, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def demo_mm_inputs(batch_size=2,
image_shapes=(3, 128, 128),
num_items=None,
num_classes=10,
sem_seg_output_strides=1,
with_mask=False,
with_semantic=False,
use_box_type=False,
device='cpu'):
"""Create a superset of inputs needed to run test or train batches.
Args:
batch_size (int): batch size. Defaults to 2.
image_shapes (List[tuple], Optional): image shape.
Defaults to (3, 128, 128)
num_items (None | List[int]): specifies the number
of boxes in each batch item. Default to None.
num_classes (int): number of different labels a
box might have. Defaults to 10.
with_mask (bool): Whether to return mask annotation.
Defaults to False.
with_semantic (bool): whether to return semantic.
Defaults to False.
device (str): Destination device type. Defaults to cpu.
"""
rng = np.random.RandomState(0)
if isinstance(image_shapes, list):
assert len(image_shapes) == batch_size
else:
image_shapes = [image_shapes] * batch_size
if isinstance(num_items, list):
assert len(num_items) == batch_size
packed_inputs = []
for idx in range(batch_size):
image_shape = image_shapes[idx]
c, h, w = image_shape
image = rng.randint(0, 255, size=image_shape, dtype=np.uint8)
mm_inputs = dict()
mm_inputs['inputs'] = torch.from_numpy(image).to(device)
img_meta = {
'img_id': idx,
'img_shape': image_shape[1:],
'ori_shape': image_shape[1:],
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2]),
'flip': False,
'flip_direction': None,
'border': [1, 1, 1, 1] # Only used by CenterNet
}
data_sample = DetDataSample()
data_sample.set_metainfo(img_meta)
# gt_instances
gt_instances = InstanceData()
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[idx]
bboxes = _rand_bboxes(rng, num_boxes, w, h)
labels = rng.randint(1, num_classes, size=num_boxes)
# TODO: remove this part when all model adapted with BaseBoxes
if use_box_type:
gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32)
else:
gt_instances.bboxes = torch.FloatTensor(bboxes)
gt_instances.labels = torch.LongTensor(labels)
if with_mask:
masks = _rand_masks(rng, num_boxes, bboxes, w, h)
gt_instances.masks = masks
# TODO: waiting for ci to be fixed
# masks = np.random.randint(0, 2, (len(bboxes), h, w), dtype=np.uint8)
# gt_instances.mask = BitmapMasks(masks, h, w)
data_sample.gt_instances = gt_instances
# ignore_instances
ignore_instances = InstanceData()
bboxes = _rand_bboxes(rng, num_boxes, w, h)
if use_box_type:
ignore_instances.bboxes = HorizontalBoxes(
bboxes, dtype=torch.float32)
else:
ignore_instances.bboxes = torch.FloatTensor(bboxes)
data_sample.ignored_instances = ignore_instances
# gt_sem_seg
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = torch.from_numpy(
np.random.randint(
0,
num_classes, (1, h // sem_seg_output_strides,
w // sem_seg_output_strides),
dtype=np.uint8))
gt_sem_seg_data = dict(sem_seg=gt_semantic_seg)
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
mm_inputs['data_samples'] = data_sample.to(device)
# TODO: gt_ignore
packed_inputs.append(mm_inputs)
data = pseudo_collate(packed_inputs)
return data
def demo_mm_proposals(image_shapes, num_proposals, device='cpu'):
"""Create a list of fake porposals.
Args:
image_shapes (list[tuple[int]]): Batch image shapes.
num_proposals (int): The number of fake proposals.
"""
rng = np.random.RandomState(0)
results = []
for img_shape in image_shapes:
result = InstanceData()
w, h = img_shape[1:]
proposals = _rand_bboxes(rng, num_proposals, w, h)
result.bboxes = torch.from_numpy(proposals).float()
result.scores = torch.from_numpy(rng.rand(num_proposals)).float()
result.labels = torch.zeros(num_proposals).long()
results.append(result.to(device))
return results
def demo_mm_sampling_results(proposals_list,
batch_gt_instances,
batch_gt_instances_ignore=None,
assigner_cfg=None,
sampler_cfg=None,
feats=None):
"""Create sample results that can be passed to BBoxHead.get_targets."""
assert len(proposals_list) == len(batch_gt_instances)
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None for _ in batch_gt_instances]
else:
assert len(batch_gt_instances_ignore) == len(batch_gt_instances)
default_assigner_cfg = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
assigner_cfg = assigner_cfg if assigner_cfg is not None \
else default_assigner_cfg
default_sampler_cfg = dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True)
sampler_cfg = sampler_cfg if sampler_cfg is not None \
else default_sampler_cfg
bbox_assigner = TASK_UTILS.build(assigner_cfg)
bbox_sampler = TASK_UTILS.build(sampler_cfg)
sampling_results = []
for i in range(len(batch_gt_instances)):
if feats is not None:
feats = [lvl_feat[i][None] for lvl_feat in feats]
# rename proposals.bboxes to proposals.priors
proposals = proposals_list[i]
proposals.priors = proposals.pop('bboxes')
assign_result = bbox_assigner.assign(proposals, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = bbox_sampler.sample(
assign_result, proposals, batch_gt_instances[i], feats=feats)
sampling_results.append(sampling_result)
return sampling_results
# TODO: Support full ceph
def replace_to_ceph(cfg):
backend_args = dict(
backend='petrel',
path_mapping=dict({
'./data/': 's3://openmmlab/datasets/detection/',
'data/': 's3://openmmlab/datasets/detection/'
}))
# TODO: name is a reserved interface, which will be used later.
def _process_pipeline(dataset, name):
def replace_img(pipeline):
if pipeline['type'] == 'LoadImageFromFile':
pipeline['backend_args'] = backend_args
def replace_ann(pipeline):
if pipeline['type'] == 'LoadAnnotations' or pipeline[
'type'] == 'LoadPanopticAnnotations':
pipeline['backend_args'] = backend_args
if 'pipeline' in dataset:
replace_img(dataset.pipeline[0])
replace_ann(dataset.pipeline[1])
if 'dataset' in dataset:
# dataset wrapper
replace_img(dataset.dataset.pipeline[0])
replace_ann(dataset.dataset.pipeline[1])
else:
# dataset wrapper
replace_img(dataset.dataset.pipeline[0])
replace_ann(dataset.dataset.pipeline[1])
def _process_evaluator(evaluator, name):
if evaluator['type'] == 'CocoPanopticMetric':
evaluator['backend_args'] = backend_args
# half ceph
_process_pipeline(cfg.train_dataloader.dataset, cfg.filename)
_process_pipeline(cfg.val_dataloader.dataset, cfg.filename)
_process_pipeline(cfg.test_dataloader.dataset, cfg.filename)
_process_evaluator(cfg.val_evaluator, cfg.filename)
_process_evaluator(cfg.test_evaluator, cfg.filename)
| 11,178 | 34.154088 | 78 |
py
|
ERD
|
ERD-main/mmdet/testing/_fast_stop_training_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class FastStopTrainingHook(Hook):
"""Set runner's epoch information to the model."""
def __init__(self, by_epoch, save_ckpt=False, stop_iter_or_epoch=5):
self.by_epoch = by_epoch
self.save_ckpt = save_ckpt
self.stop_iter_or_epoch = stop_iter_or_epoch
def after_train_iter(self, runner, batch_idx: int, data_batch: None,
outputs: None) -> None:
if self.save_ckpt and self.by_epoch:
# If it is epoch-based and want to save weights,
# we must run at least 1 epoch.
return
if runner.iter >= self.stop_iter_or_epoch:
raise RuntimeError('quick exit')
def after_train_epoch(self, runner) -> None:
if runner.epoch >= self.stop_iter_or_epoch - 1:
raise RuntimeError('quick exit')
| 964 | 33.464286 | 72 |
py
|
ERD
|
ERD-main/mmdet/testing/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._fast_stop_training_hook import FastStopTrainingHook # noqa: F401,F403
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg, replace_to_ceph)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results', 'replace_to_ceph'
]
| 451 | 40.090909 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
| 541 | 40.692308 | 52 |
py
|
ERD
|
ERD-main/mmdet/models/data_preprocessors/data_preprocessor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from numbers import Number
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.dist import barrier, broadcast, get_dist_info
from mmengine.logging import MessageHub
from mmengine.model import BaseDataPreprocessor, ImgDataPreprocessor
from mmengine.structures import PixelData
from mmengine.utils import is_seq_of
from torch import Tensor
from mmdet.models.utils import unfold_wo_center
from mmdet.models.utils.misc import samplelist_boxtype2tensor
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
from mmdet.utils import ConfigType
try:
import skimage
except ImportError:
skimage = None
@MODELS.register_module()
class DetDataPreprocessor(ImgDataPreprocessor):
"""Image pre-processor for detection tasks.
Comparing with the :class:`mmengine.ImgDataPreprocessor`,
1. It supports batch augmentations.
2. It will additionally append batch_input_shape and pad_shape
to data_samples considering the object detection task.
It provides the data pre-processing as follows
- Collate and move data to the target device.
- Pad inputs to the maximum size of current batch with defined
``pad_value``. The padding size can be divisible by a defined
``pad_size_divisor``
- Stack inputs to batch_inputs.
- Convert inputs from bgr to rgb if the shape of input is (3, H, W).
- Normalize image with defined std and mean.
- Do batch augmentations during training.
Args:
mean (Sequence[Number], optional): The pixel mean of R, G, B channels.
Defaults to None.
std (Sequence[Number], optional): The pixel standard deviation of
R, G, B channels. Defaults to None.
pad_size_divisor (int): The size of padded image should be
divisible by ``pad_size_divisor``. Defaults to 1.
pad_value (Number): The padded pixel value. Defaults to 0.
pad_mask (bool): Whether to pad instance masks. Defaults to False.
mask_pad_value (int): The padded pixel value for instance masks.
Defaults to 0.
pad_seg (bool): Whether to pad semantic segmentation maps.
Defaults to False.
seg_pad_value (int): The padded pixel value for semantic
segmentation maps. Defaults to 255.
bgr_to_rgb (bool): whether to convert image from BGR to RGB.
Defaults to False.
rgb_to_bgr (bool): whether to convert image from RGB to RGB.
Defaults to False.
boxtype2tensor (bool): Whether to keep the ``BaseBoxes`` type of
bboxes data or not. Defaults to True.
non_blocking (bool): Whether block current process
when transferring data to device. Defaults to False.
batch_augments (list[dict], optional): Batch-level augmentations
"""
def __init__(self,
mean: Sequence[Number] = None,
std: Sequence[Number] = None,
pad_size_divisor: int = 1,
pad_value: Union[float, int] = 0,
pad_mask: bool = False,
mask_pad_value: int = 0,
pad_seg: bool = False,
seg_pad_value: int = 255,
bgr_to_rgb: bool = False,
rgb_to_bgr: bool = False,
boxtype2tensor: bool = True,
non_blocking: Optional[bool] = False,
batch_augments: Optional[List[dict]] = None):
super().__init__(
mean=mean,
std=std,
pad_size_divisor=pad_size_divisor,
pad_value=pad_value,
bgr_to_rgb=bgr_to_rgb,
rgb_to_bgr=rgb_to_bgr,
non_blocking=non_blocking)
if batch_augments is not None:
self.batch_augments = nn.ModuleList(
[MODELS.build(aug) for aug in batch_augments])
else:
self.batch_augments = None
self.pad_mask = pad_mask
self.mask_pad_value = mask_pad_value
self.pad_seg = pad_seg
self.seg_pad_value = seg_pad_value
self.boxtype2tensor = boxtype2tensor
def forward(self, data: dict, training: bool = False) -> dict:
"""Perform normalization、padding and bgr2rgb conversion based on
``BaseDataPreprocessor``.
Args:
data (dict): Data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
"""
batch_pad_shape = self._get_pad_shape(data)
data = super().forward(data=data, training=training)
inputs, data_samples = data['inputs'], data['data_samples']
if data_samples is not None:
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
batch_input_shape = tuple(inputs[0].size()[-2:])
for data_sample, pad_shape in zip(data_samples, batch_pad_shape):
data_sample.set_metainfo({
'batch_input_shape': batch_input_shape,
'pad_shape': pad_shape
})
if self.boxtype2tensor:
samplelist_boxtype2tensor(data_samples)
if self.pad_mask and training:
self.pad_gt_masks(data_samples)
if self.pad_seg and training:
self.pad_gt_sem_seg(data_samples)
if training and self.batch_augments is not None:
for batch_aug in self.batch_augments:
inputs, data_samples = batch_aug(inputs, data_samples)
return {'inputs': inputs, 'data_samples': data_samples}
def _get_pad_shape(self, data: dict) -> List[tuple]:
"""Get the pad_shape of each image based on data and
pad_size_divisor."""
_batch_inputs = data['inputs']
# Process data with `pseudo_collate`.
if is_seq_of(_batch_inputs, torch.Tensor):
batch_pad_shape = []
for ori_input in _batch_inputs:
pad_h = int(
np.ceil(ori_input.shape[1] /
self.pad_size_divisor)) * self.pad_size_divisor
pad_w = int(
np.ceil(ori_input.shape[2] /
self.pad_size_divisor)) * self.pad_size_divisor
batch_pad_shape.append((pad_h, pad_w))
# Process data with `default_collate`.
elif isinstance(_batch_inputs, torch.Tensor):
assert _batch_inputs.dim() == 4, (
'The input of `ImgDataPreprocessor` should be a NCHW tensor '
'or a list of tensor, but got a tensor with shape: '
f'{_batch_inputs.shape}')
pad_h = int(
np.ceil(_batch_inputs.shape[1] /
self.pad_size_divisor)) * self.pad_size_divisor
pad_w = int(
np.ceil(_batch_inputs.shape[2] /
self.pad_size_divisor)) * self.pad_size_divisor
batch_pad_shape = [(pad_h, pad_w)] * _batch_inputs.shape[0]
else:
raise TypeError('Output of `cast_data` should be a dict '
'or a tuple with inputs and data_samples, but got'
f'{type(data)}: {data}')
return batch_pad_shape
def pad_gt_masks(self,
batch_data_samples: Sequence[DetDataSample]) -> None:
"""Pad gt_masks to shape of batch_input_shape."""
if 'masks' in batch_data_samples[0].gt_instances:
for data_samples in batch_data_samples:
masks = data_samples.gt_instances.masks
data_samples.gt_instances.masks = masks.pad(
data_samples.batch_input_shape,
pad_val=self.mask_pad_value)
def pad_gt_sem_seg(self,
batch_data_samples: Sequence[DetDataSample]) -> None:
"""Pad gt_sem_seg to shape of batch_input_shape."""
if 'gt_sem_seg' in batch_data_samples[0]:
for data_samples in batch_data_samples:
gt_sem_seg = data_samples.gt_sem_seg.sem_seg
h, w = gt_sem_seg.shape[-2:]
pad_h, pad_w = data_samples.batch_input_shape
gt_sem_seg = F.pad(
gt_sem_seg,
pad=(0, max(pad_w - w, 0), 0, max(pad_h - h, 0)),
mode='constant',
value=self.seg_pad_value)
data_samples.gt_sem_seg = PixelData(sem_seg=gt_sem_seg)
@MODELS.register_module()
class BatchSyncRandomResize(nn.Module):
"""Batch random resize which synchronizes the random size across ranks.
Args:
random_size_range (tuple): The multi-scale random range during
multi-scale training.
interval (int): The iter interval of change
image size. Defaults to 10.
size_divisor (int): Image size divisible factor.
Defaults to 32.
"""
def __init__(self,
random_size_range: Tuple[int, int],
interval: int = 10,
size_divisor: int = 32) -> None:
super().__init__()
self.rank, self.world_size = get_dist_info()
self._input_size = None
self._random_size_range = (round(random_size_range[0] / size_divisor),
round(random_size_range[1] / size_divisor))
self._interval = interval
self._size_divisor = size_divisor
def forward(
self, inputs: Tensor, data_samples: List[DetDataSample]
) -> Tuple[Tensor, List[DetDataSample]]:
"""resize a batch of images and bboxes to shape ``self._input_size``"""
h, w = inputs.shape[-2:]
if self._input_size is None:
self._input_size = (h, w)
scale_y = self._input_size[0] / h
scale_x = self._input_size[1] / w
if scale_x != 1 or scale_y != 1:
inputs = F.interpolate(
inputs,
size=self._input_size,
mode='bilinear',
align_corners=False)
for data_sample in data_samples:
img_shape = (int(data_sample.img_shape[0] * scale_y),
int(data_sample.img_shape[1] * scale_x))
pad_shape = (int(data_sample.pad_shape[0] * scale_y),
int(data_sample.pad_shape[1] * scale_x))
data_sample.set_metainfo({
'img_shape': img_shape,
'pad_shape': pad_shape,
'batch_input_shape': self._input_size
})
data_sample.gt_instances.bboxes[
...,
0::2] = data_sample.gt_instances.bboxes[...,
0::2] * scale_x
data_sample.gt_instances.bboxes[
...,
1::2] = data_sample.gt_instances.bboxes[...,
1::2] * scale_y
if 'ignored_instances' in data_sample:
data_sample.ignored_instances.bboxes[
..., 0::2] = data_sample.ignored_instances.bboxes[
..., 0::2] * scale_x
data_sample.ignored_instances.bboxes[
..., 1::2] = data_sample.ignored_instances.bboxes[
..., 1::2] * scale_y
message_hub = MessageHub.get_current_instance()
if (message_hub.get_info('iter') + 1) % self._interval == 0:
self._input_size = self._get_random_size(
aspect_ratio=float(w / h), device=inputs.device)
return inputs, data_samples
def _get_random_size(self, aspect_ratio: float,
device: torch.device) -> Tuple[int, int]:
"""Randomly generate a shape in ``_random_size_range`` and broadcast to
all ranks."""
tensor = torch.LongTensor(2).to(device)
if self.rank == 0:
size = random.randint(*self._random_size_range)
size = (self._size_divisor * size,
self._size_divisor * int(aspect_ratio * size))
tensor[0] = size[0]
tensor[1] = size[1]
barrier()
broadcast(tensor, 0)
input_size = (tensor[0].item(), tensor[1].item())
return input_size
@MODELS.register_module()
class BatchFixedSizePad(nn.Module):
"""Fixed size padding for batch images.
Args:
size (Tuple[int, int]): Fixed padding size. Expected padding
shape (h, w). Defaults to None.
img_pad_value (int): The padded pixel value for images.
Defaults to 0.
pad_mask (bool): Whether to pad instance masks. Defaults to False.
mask_pad_value (int): The padded pixel value for instance masks.
Defaults to 0.
pad_seg (bool): Whether to pad semantic segmentation maps.
Defaults to False.
seg_pad_value (int): The padded pixel value for semantic
segmentation maps. Defaults to 255.
"""
def __init__(self,
size: Tuple[int, int],
img_pad_value: int = 0,
pad_mask: bool = False,
mask_pad_value: int = 0,
pad_seg: bool = False,
seg_pad_value: int = 255) -> None:
super().__init__()
self.size = size
self.pad_mask = pad_mask
self.pad_seg = pad_seg
self.img_pad_value = img_pad_value
self.mask_pad_value = mask_pad_value
self.seg_pad_value = seg_pad_value
def forward(
self,
inputs: Tensor,
data_samples: Optional[List[dict]] = None
) -> Tuple[Tensor, Optional[List[dict]]]:
"""Pad image, instance masks, segmantic segmentation maps."""
src_h, src_w = inputs.shape[-2:]
dst_h, dst_w = self.size
if src_h >= dst_h and src_w >= dst_w:
return inputs, data_samples
inputs = F.pad(
inputs,
pad=(0, max(0, dst_w - src_w), 0, max(0, dst_h - src_h)),
mode='constant',
value=self.img_pad_value)
if data_samples is not None:
# update batch_input_shape
for data_sample in data_samples:
data_sample.set_metainfo({
'batch_input_shape': (dst_h, dst_w),
'pad_shape': (dst_h, dst_w)
})
if self.pad_mask:
for data_sample in data_samples:
masks = data_sample.gt_instances.masks
data_sample.gt_instances.masks = masks.pad(
(dst_h, dst_w), pad_val=self.mask_pad_value)
if self.pad_seg:
for data_sample in data_samples:
gt_sem_seg = data_sample.gt_sem_seg.sem_seg
h, w = gt_sem_seg.shape[-2:]
gt_sem_seg = F.pad(
gt_sem_seg,
pad=(0, max(0, dst_w - w), 0, max(0, dst_h - h)),
mode='constant',
value=self.seg_pad_value)
data_sample.gt_sem_seg = PixelData(sem_seg=gt_sem_seg)
return inputs, data_samples
@MODELS.register_module()
class MultiBranchDataPreprocessor(BaseDataPreprocessor):
"""DataPreprocessor wrapper for multi-branch data.
Take semi-supervised object detection as an example, assume that
the ratio of labeled data and unlabeled data in a batch is 1:2,
`sup` indicates the branch where the labeled data is augmented,
`unsup_teacher` and `unsup_student` indicate the branches where
the unlabeled data is augmented by different pipeline.
The input format of multi-branch data is shown as below :
.. code-block:: none
{
'inputs':
{
'sup': [Tensor, None, None],
'unsup_teacher': [None, Tensor, Tensor],
'unsup_student': [None, Tensor, Tensor],
},
'data_sample':
{
'sup': [DetDataSample, None, None],
'unsup_teacher': [None, DetDataSample, DetDataSample],
'unsup_student': [NOne, DetDataSample, DetDataSample],
}
}
The format of multi-branch data
after filtering None is shown as below :
.. code-block:: none
{
'inputs':
{
'sup': [Tensor],
'unsup_teacher': [Tensor, Tensor],
'unsup_student': [Tensor, Tensor],
},
'data_sample':
{
'sup': [DetDataSample],
'unsup_teacher': [DetDataSample, DetDataSample],
'unsup_student': [DetDataSample, DetDataSample],
}
}
In order to reuse `DetDataPreprocessor` for the data
from different branches, the format of multi-branch data
grouped by branch is as below :
.. code-block:: none
{
'sup':
{
'inputs': [Tensor]
'data_sample': [DetDataSample, DetDataSample]
},
'unsup_teacher':
{
'inputs': [Tensor, Tensor]
'data_sample': [DetDataSample, DetDataSample]
},
'unsup_student':
{
'inputs': [Tensor, Tensor]
'data_sample': [DetDataSample, DetDataSample]
},
}
After preprocessing data from different branches,
the multi-branch data needs to be reformatted as:
.. code-block:: none
{
'inputs':
{
'sup': [Tensor],
'unsup_teacher': [Tensor, Tensor],
'unsup_student': [Tensor, Tensor],
},
'data_sample':
{
'sup': [DetDataSample],
'unsup_teacher': [DetDataSample, DetDataSample],
'unsup_student': [DetDataSample, DetDataSample],
}
}
Args:
data_preprocessor (:obj:`ConfigDict` or dict): Config of
:class:`DetDataPreprocessor` to process the input data.
"""
def __init__(self, data_preprocessor: ConfigType) -> None:
super().__init__()
self.data_preprocessor = MODELS.build(data_preprocessor)
def forward(self, data: dict, training: bool = False) -> dict:
"""Perform normalization、padding and bgr2rgb conversion based on
``BaseDataPreprocessor`` for multi-branch data.
Args:
data (dict): Data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict:
- 'inputs' (Dict[str, obj:`torch.Tensor`]): The forward data of
models from different branches.
- 'data_sample' (Dict[str, obj:`DetDataSample`]): The annotation
info of the sample from different branches.
"""
if training is False:
return self.data_preprocessor(data, training)
# Filter out branches with a value of None
for key in data.keys():
for branch in data[key].keys():
data[key][branch] = list(
filter(lambda x: x is not None, data[key][branch]))
# Group data by branch
multi_branch_data = {}
for key in data.keys():
for branch in data[key].keys():
if multi_branch_data.get(branch, None) is None:
multi_branch_data[branch] = {key: data[key][branch]}
elif multi_branch_data[branch].get(key, None) is None:
multi_branch_data[branch][key] = data[key][branch]
else:
multi_branch_data[branch][key].append(data[key][branch])
# Preprocess data from different branches
for branch, _data in multi_branch_data.items():
multi_branch_data[branch] = self.data_preprocessor(_data, training)
# Format data by inputs and data_samples
format_data = {}
for branch in multi_branch_data.keys():
for key in multi_branch_data[branch].keys():
if format_data.get(key, None) is None:
format_data[key] = {branch: multi_branch_data[branch][key]}
elif format_data[key].get(branch, None) is None:
format_data[key][branch] = multi_branch_data[branch][key]
else:
format_data[key][branch].append(
multi_branch_data[branch][key])
return format_data
@property
def device(self):
return self.data_preprocessor.device
def to(self, device: Optional[Union[int, torch.device]], *args,
**kwargs) -> nn.Module:
"""Overrides this method to set the :attr:`device`
Args:
device (int or torch.device, optional): The desired device of the
parameters and buffers in this module.
Returns:
nn.Module: The model itself.
"""
return self.data_preprocessor.to(device, *args, **kwargs)
def cuda(self, *args, **kwargs) -> nn.Module:
"""Overrides this method to set the :attr:`device`
Returns:
nn.Module: The model itself.
"""
return self.data_preprocessor.cuda(*args, **kwargs)
def cpu(self, *args, **kwargs) -> nn.Module:
"""Overrides this method to set the :attr:`device`
Returns:
nn.Module: The model itself.
"""
return self.data_preprocessor.cpu(*args, **kwargs)
@MODELS.register_module()
class BatchResize(nn.Module):
"""Batch resize during training. This implementation is modified from
https://github.com/Purkialo/CrowdDet/blob/master/lib/data/CrowdHuman.py.
It provides the data pre-processing as follows:
- A batch of all images will pad to a uniform size and stack them into
a torch.Tensor by `DetDataPreprocessor`.
- `BatchFixShapeResize` resize all images to the target size.
- Padding images to make sure the size of image can be divisible by
``pad_size_divisor``.
Args:
scale (tuple): Images scales for resizing.
pad_size_divisor (int): Image size divisible factor.
Defaults to 1.
pad_value (Number): The padded pixel value. Defaults to 0.
"""
def __init__(
self,
scale: tuple,
pad_size_divisor: int = 1,
pad_value: Union[float, int] = 0,
) -> None:
super().__init__()
self.min_size = min(scale)
self.max_size = max(scale)
self.pad_size_divisor = pad_size_divisor
self.pad_value = pad_value
def forward(
self, inputs: Tensor, data_samples: List[DetDataSample]
) -> Tuple[Tensor, List[DetDataSample]]:
"""resize a batch of images and bboxes."""
batch_height, batch_width = inputs.shape[-2:]
target_height, target_width, scale = self.get_target_size(
batch_height, batch_width)
inputs = F.interpolate(
inputs,
size=(target_height, target_width),
mode='bilinear',
align_corners=False)
inputs = self.get_padded_tensor(inputs, self.pad_value)
if data_samples is not None:
batch_input_shape = tuple(inputs.size()[-2:])
for data_sample in data_samples:
img_shape = [
int(scale * _) for _ in list(data_sample.img_shape)
]
data_sample.set_metainfo({
'img_shape': tuple(img_shape),
'batch_input_shape': batch_input_shape,
'pad_shape': batch_input_shape,
'scale_factor': (scale, scale)
})
data_sample.gt_instances.bboxes *= scale
data_sample.ignored_instances.bboxes *= scale
return inputs, data_samples
def get_target_size(self, height: int,
width: int) -> Tuple[int, int, float]:
"""Get the target size of a batch of images based on data and scale."""
im_size_min = np.min([height, width])
im_size_max = np.max([height, width])
scale = self.min_size / im_size_min
if scale * im_size_max > self.max_size:
scale = self.max_size / im_size_max
target_height, target_width = int(round(height * scale)), int(
round(width * scale))
return target_height, target_width, scale
def get_padded_tensor(self, tensor: Tensor, pad_value: int) -> Tensor:
"""Pad images according to pad_size_divisor."""
assert tensor.ndim == 4
target_height, target_width = tensor.shape[-2], tensor.shape[-1]
divisor = self.pad_size_divisor
padded_height = (target_height + divisor - 1) // divisor * divisor
padded_width = (target_width + divisor - 1) // divisor * divisor
padded_tensor = torch.ones([
tensor.shape[0], tensor.shape[1], padded_height, padded_width
]) * pad_value
padded_tensor = padded_tensor.type_as(tensor)
padded_tensor[:, :, :target_height, :target_width] = tensor
return padded_tensor
@MODELS.register_module()
class BoxInstDataPreprocessor(DetDataPreprocessor):
"""Pseudo mask pre-processor for BoxInst.
Comparing with the :class:`mmdet.DetDataPreprocessor`,
1. It generates masks using box annotations.
2. It computes the images color similarity in LAB color space.
Args:
mask_stride (int): The mask output stride in boxinst. Defaults to 4.
pairwise_size (int): The size of neighborhood for each pixel.
Defaults to 3.
pairwise_dilation (int): The dilation of neighborhood for each pixel.
Defaults to 2.
pairwise_color_thresh (float): The thresh of image color similarity.
Defaults to 0.3.
bottom_pixels_removed (int): The length of removed pixels in bottom.
It is caused by the annotation error in coco dataset.
Defaults to 10.
"""
def __init__(self,
*arg,
mask_stride: int = 4,
pairwise_size: int = 3,
pairwise_dilation: int = 2,
pairwise_color_thresh: float = 0.3,
bottom_pixels_removed: int = 10,
**kwargs) -> None:
super().__init__(*arg, **kwargs)
self.mask_stride = mask_stride
self.pairwise_size = pairwise_size
self.pairwise_dilation = pairwise_dilation
self.pairwise_color_thresh = pairwise_color_thresh
self.bottom_pixels_removed = bottom_pixels_removed
if skimage is None:
raise RuntimeError('skimage is not installed,\
please install it by: pip install scikit-image')
def get_images_color_similarity(self, inputs: Tensor,
image_masks: Tensor) -> Tensor:
"""Compute the image color similarity in LAB color space."""
assert inputs.dim() == 4
assert inputs.size(0) == 1
unfolded_images = unfold_wo_center(
inputs,
kernel_size=self.pairwise_size,
dilation=self.pairwise_dilation)
diff = inputs[:, :, None] - unfolded_images
similarity = torch.exp(-torch.norm(diff, dim=1) * 0.5)
unfolded_weights = unfold_wo_center(
image_masks[None, None],
kernel_size=self.pairwise_size,
dilation=self.pairwise_dilation)
unfolded_weights = torch.max(unfolded_weights, dim=1)[0]
return similarity * unfolded_weights
def forward(self, data: dict, training: bool = False) -> dict:
"""Get pseudo mask labels using color similarity."""
det_data = super().forward(data, training)
inputs, data_samples = det_data['inputs'], det_data['data_samples']
if training:
# get image masks and remove bottom pixels
b_img_h, b_img_w = data_samples[0].batch_input_shape
img_masks = []
for i in range(inputs.shape[0]):
img_h, img_w = data_samples[i].img_shape
img_mask = inputs.new_ones((img_h, img_w))
pixels_removed = int(self.bottom_pixels_removed *
float(img_h) / float(b_img_h))
if pixels_removed > 0:
img_mask[-pixels_removed:, :] = 0
pad_w = b_img_w - img_w
pad_h = b_img_h - img_h
img_mask = F.pad(img_mask, (0, pad_w, 0, pad_h), 'constant',
0.)
img_masks.append(img_mask)
img_masks = torch.stack(img_masks, dim=0)
start = int(self.mask_stride // 2)
img_masks = img_masks[:, start::self.mask_stride,
start::self.mask_stride]
# Get origin rgb image for color similarity
ori_imgs = inputs * self.std + self.mean
downsampled_imgs = F.avg_pool2d(
ori_imgs.float(),
kernel_size=self.mask_stride,
stride=self.mask_stride,
padding=0)
# Compute color similarity for pseudo mask generation
for im_i, data_sample in enumerate(data_samples):
# TODO: Support rgb2lab in mmengine?
images_lab = skimage.color.rgb2lab(
downsampled_imgs[im_i].byte().permute(1, 2,
0).cpu().numpy())
images_lab = torch.as_tensor(
images_lab, device=ori_imgs.device, dtype=torch.float32)
images_lab = images_lab.permute(2, 0, 1)[None]
images_color_similarity = self.get_images_color_similarity(
images_lab, img_masks[im_i])
pairwise_mask = (images_color_similarity >=
self.pairwise_color_thresh).float()
per_im_bboxes = data_sample.gt_instances.bboxes
if per_im_bboxes.shape[0] > 0:
per_im_masks = []
for per_box in per_im_bboxes:
mask_full = torch.zeros((b_img_h, b_img_w),
device=self.device).float()
mask_full[int(per_box[1]):int(per_box[3] + 1),
int(per_box[0]):int(per_box[2] + 1)] = 1.0
per_im_masks.append(mask_full)
per_im_masks = torch.stack(per_im_masks, dim=0)
pairwise_masks = torch.cat(
[pairwise_mask for _ in range(per_im_bboxes.shape[0])],
dim=0)
else:
per_im_masks = torch.zeros((0, b_img_h, b_img_w))
pairwise_masks = torch.zeros(
(0, self.pairwise_size**2 - 1, b_img_h, b_img_w))
# TODO: Support BitmapMasks with tensor?
data_sample.gt_instances.masks = BitmapMasks(
per_im_masks.cpu().numpy(), b_img_h, b_img_w)
data_sample.gt_instances.pairwise_masks = pairwise_masks
return {'inputs': inputs, 'data_samples': data_samples}
| 32,074 | 39.396725 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/data_preprocessors/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, BoxInstDataPreprocessor,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor'
]
| 470 | 41.818182 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/solo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,088 | 33.03125 | 67 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/conditional_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from ..layers import (ConditionalDetrTransformerDecoder,
DetrTransformerEncoder, SinePositionalEncoding)
from .detr import DETR
@MODELS.register_module()
class ConditionalDETR(DETR):
r"""Implementation of `Conditional DETR for Fast Training Convergence.
<https://arxiv.org/abs/2108.06152>`_.
Code is modified from the `official github repo
<https://github.com/Atten4Vis/ConditionalDETR>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
- references (Tensor): Has shape
(bs, num_queries, 2)
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
| 3,029 | 39.4 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/yolox.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,758 | 38.977273 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/fsaf.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 929 | 33.444444 | 70 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/two_stage.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from typing import List, Tuple, Union
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
rpn_head_num_classes = rpn_head_.get('num_classes', None)
if rpn_head_num_classes is None:
rpn_head_.update(num_classes=1)
else:
if rpn_head_num_classes != 1:
warnings.warn(
'The `num_classes` should be 1 in RPN, but get '
f'{rpn_head_num_classes}, please set '
'rpn_head.num_classes = 1 in your config file.')
rpn_head_.update(num_classes=1)
self.rpn_head = MODELS.build(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = MODELS.build(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Exchange bbox_head key to rpn_head key when loading single-stage
weights into two-stage model."""
bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(bbox_head_prefix)
]
rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'
rpn_head_keys = [
k for k in state_dict.keys() if k.startswith(rpn_head_prefix)
]
if len(bbox_head_keys) != 0 and len(rpn_head_keys) == 0:
for bbox_head_key in bbox_head_keys:
rpn_head_key = rpn_head_prefix + \
bbox_head_key[len(bbox_head_prefix):]
state_dict[rpn_head_key] = state_dict.pop(bbox_head_key)
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
@property
def with_rpn(self) -> bool:
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self) -> bool:
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns:
tuple: A tuple of features from ``rpn_head`` and ``roi_head``
forward.
"""
results = ()
x = self.extract_feat(batch_inputs)
if self.with_rpn:
rpn_results_list = self.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
assert batch_data_samples[0].get('proposals', None) is not None
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
roi_outs = self.roi_head.forward(x, rpn_results_list,
batch_data_samples)
results = results + (roi_outs, )
return results
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components
"""
x = self.extract_feat(batch_inputs)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_data_samples = copy.deepcopy(batch_data_samples)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict(
x, rpn_data_samples, proposal_cfg=proposal_cfg)
# avoid get same name with roi_head loss
keys = rpn_losses.keys()
for key in list(keys):
if 'loss' in key and 'rpn' not in key:
rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)
losses.update(rpn_losses)
else:
assert batch_data_samples[0].get('proposals', None) is not None
# use pre-defined proposals in InstanceData for the second stage
# to extract ROI features.
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
roi_losses = self.roi_head.loss(x, rpn_results_list,
batch_data_samples)
losses.update(roi_losses)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Return the detection results of the
input images. The returns value is DetDataSample,
which usually contain 'pred_instances'. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(batch_inputs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
rpn_results_list = self.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
results_list = self.roi_head.predict(
x, rpn_results_list, batch_data_samples, rescale=rescale)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
| 9,942 | 39.75 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/base.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch
from mmengine.model import BaseModel
from torch import Tensor
from mmdet.structures import DetDataSample, OptSampleList, SampleList
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig
from ..utils import samplelist_boxtype2tensor
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
class BaseDetector(BaseModel, metaclass=ABCMeta):
"""Base class for detectors.
Args:
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`BaseDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
init_cfg (dict or ConfigDict, optional): the config to control the
initialization. Defaults to None.
"""
def __init__(self,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
@property
def with_neck(self) -> bool:
"""bool: whether the detector has a neck"""
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self) -> bool:
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
@property
def with_bbox(self) -> bool:
"""bool: whether the detector has a bbox head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self) -> bool:
"""bool: whether the detector has a mask head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
def forward(self,
inputs: torch.Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor') -> ForwardResults:
"""The unified entry for a forward process in both training and test.
The method should accept three modes: "tensor", "predict" and "loss":
- "tensor": Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- "predict": Forward and return the predictions, which are fully
processed to a list of :obj:`DetDataSample`.
- "loss": Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle either back propagation or
parameter update, which are supposed to be done in :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (list[:obj:`DetDataSample`], optional): A batch of
data samples that contain annotations and predictions.
Defaults to None.
mode (str): Return what kind of value. Defaults to 'tensor'.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of :obj:`DetDataSample`.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'loss':
return self.loss(inputs, data_samples)
elif mode == 'predict':
return self.predict(inputs, data_samples)
elif mode == 'tensor':
return self._forward(inputs, data_samples)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode')
@abstractmethod
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, tuple]:
"""Calculate losses from a batch of inputs and data samples."""
pass
@abstractmethod
def predict(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing."""
pass
@abstractmethod
def _forward(self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None):
"""Network forward process.
Usually includes backbone, neck and head forward without any post-
processing.
"""
pass
@abstractmethod
def extract_feat(self, batch_inputs: Tensor):
"""Extract features from images."""
pass
def add_pred_to_datasample(self, data_samples: SampleList,
results_list: InstanceList) -> SampleList:
"""Add predictions to `DetDataSample`.
Args:
data_samples (list[:obj:`DetDataSample`], optional): A batch of
data samples that contain annotations and predictions.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
for data_sample, pred_instances in zip(data_samples, results_list):
data_sample.pred_instances = pred_instances
samplelist_boxtype2tensor(data_samples)
return data_samples
| 6,255 | 38.847134 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/single_stage.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = MODELS.build(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Exchange bbox_head key to rpn_head key when loading two-stage
weights into single-stage model."""
bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(bbox_head_prefix)
]
rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'
rpn_head_keys = [
k for k in state_dict.keys() if k.startswith(rpn_head_prefix)
]
if len(bbox_head_keys) == 0 and len(rpn_head_keys) != 0:
for rpn_head_key in rpn_head_keys:
bbox_head_key = bbox_head_prefix + \
rpn_head_key[len(rpn_head_prefix):]
state_dict[bbox_head_key] = state_dict.pop(rpn_head_key)
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, list]:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, batch_data_samples)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
x = self.extract_feat(batch_inputs)
results_list = self.bbox_head.predict(
x, batch_data_samples, rescale=rescale)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
def _forward(
self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns:
tuple[list]: A tuple of features from ``bbox_head`` forward.
"""
x = self.extract_feat(batch_inputs)
results = self.bbox_head.forward(x)
return results
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
| 6,124 | 39.833333 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/gfl.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from torch import Tensor
import torch
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class GFL(SingleStageDetector):
"""Implementation of `GFL <https://arxiv.org/abs/2006.04388>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of GFL. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of GFL. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# for replay method by minimum cost
def compute_cost_for_memory(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
batch_cost = self.bbox_head.compute_cost_for_memory(x, batch_data_samples, cur_class_num)
return batch_cost
def tensor2numpy(self, x):
return x.cpu().data.numpy() if x.is_cuda else x.data.numpy()
# for replay method by icaRL
def compute_cost_for_memory_icarl(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
# batch_cost = self.bbox_head.compute_cost_for_memory_icarl(x, batch_data_samples, cur_class_num)
batch_cost = torch.cat([per_x.reshape(per_x.shape[0], per_x.shape[1], -1) for per_x in x], dim=2).mean(-1)
return self.tensor2numpy(batch_cost)
# importance metric
def compute_importance_for_replay_v3(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
batch_importance = self.bbox_head.compute_importance_for_replay_v3(x, batch_data_samples, cur_class_num)
return batch_importance
def compute_cost_and_feats_for_replay_v4(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost and feats for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
batch_feats = torch.cat([per_x.reshape(per_x.shape[0], per_x.shape[1], -1) for per_x in x], dim=2).mean(-1)
batch_importances = self.bbox_head.compute_importance_for_replay_v4(x, batch_data_samples, cur_class_num)
return self.tensor2numpy(batch_feats), batch_importances
| 5,661 | 43.582677 | 115 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from ..layers import (DetrTransformerDecoder, DetrTransformerEncoder,
SinePositionalEncoding)
from .base_detr import DetectionTransformer
@MODELS.register_module()
class DETR(DetectionTransformer):
r"""Implementation of `DETR: End-to-End Object Detection with Transformers.
<https://arxiv.org/pdf/2005.12872>`_.
Code is modified from the `official github repo
<https://github.com/facebookresearch/detr>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = DetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super().init_weights()
for coder in self.encoder, self.decoder:
for p in coder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def pre_transformer(
self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Tuple[Dict, Dict]:
"""Prepare the inputs of the Transformer.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
img_feats (Tuple[Tensor]): Tuple of features output from the neck,
has shape (bs, c, h, w).
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such as
`gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict, dict]: The first dict contains the inputs of encoder
and the second dict contains the inputs of decoder.
- encoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_encoder()`, which includes 'feat', 'feat_mask',
and 'feat_pos'.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'memory_mask',
and 'memory_pos'.
"""
feat = img_feats[-1] # NOTE img_feats contains only one feature.
batch_size, feat_dim, _, _ = feat.shape
# construct binary masks which for the transformer.
assert batch_data_samples is not None
batch_input_shape = batch_data_samples[0].batch_input_shape
img_shape_list = [sample.img_shape for sample in batch_data_samples]
input_img_h, input_img_w = batch_input_shape
masks = feat.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w = img_shape_list[img_id]
masks[img_id, :img_h, :img_w] = 0
# NOTE following the official DETR repo, non-zero values represent
# ignored positions, while zero values mean valid positions.
masks = F.interpolate(
masks.unsqueeze(1), size=feat.shape[-2:]).to(torch.bool).squeeze(1)
# [batch_size, embed_dim, h, w]
pos_embed = self.positional_encoding(masks)
# use `view` instead of `flatten` for dynamically exporting to ONNX
# [bs, c, h, w] -> [bs, h*w, c]
feat = feat.view(batch_size, feat_dim, -1).permute(0, 2, 1)
pos_embed = pos_embed.view(batch_size, feat_dim, -1).permute(0, 2, 1)
# [bs, h, w] -> [bs, h*w]
masks = masks.view(batch_size, -1)
# prepare transformer_inputs_dict
encoder_inputs_dict = dict(
feat=feat, feat_mask=masks, feat_pos=pos_embed)
decoder_inputs_dict = dict(memory_mask=masks, memory_pos=pos_embed)
return encoder_inputs_dict, decoder_inputs_dict
def forward_encoder(self, feat: Tensor, feat_mask: Tensor,
feat_pos: Tensor) -> Dict:
"""Forward with Transformer encoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
feat (Tensor): Sequential features, has shape (bs, num_feat_points,
dim).
feat_mask (Tensor): ByteTensor, the padding mask of the features,
has shape (bs, num_feat_points).
feat_pos (Tensor): The positional embeddings of the features, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of encoder outputs, which includes the
`memory` of the encoder output.
"""
memory = self.encoder(
query=feat, query_pos=feat_pos,
key_padding_mask=feat_mask) # for self_attn
encoder_outputs_dict = dict(memory=memory)
return encoder_outputs_dict
def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory'.
- head_inputs_dict (dict): The keyword args dictionary of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
batch_size = memory.size(0) # (bs, num_feat_points, dim)
query_pos = self.query_embedding.weight
# (num_queries, dim) -> (bs, num_queries, dim)
query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)
query = torch.zeros_like(query_pos)
decoder_inputs_dict = dict(
query_pos=query_pos, query=query, memory=memory)
head_inputs_dict = dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
"""
hidden_states = self.decoder(
query=query,
key=memory,
value=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask) # for cross_attn
head_inputs_dict = dict(hidden_states=hidden_states)
return head_inputs_dict
| 9,383 | 42.24424 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/yolo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOV3(SingleStageDetector):
r"""Implementation of `Yolov3: An incremental improvement
<https://arxiv.org/abs/1804.02767>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Default: None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Default: None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,876 | 39.804348 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/reppoints_detector.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RepPointsDetector(SingleStageDetector):
"""RepPoints: Point Set Representation for Object Detection.
This detector is the implementation of:
- RepPoints detector (https://arxiv.org/pdf/1904.11490)
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,057 | 33.129032 | 65 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/scnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
| 324 | 26.083333 | 71 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/ddod.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DDOD(SingleStageDetector):
"""Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,684 | 39.119048 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/semi_base.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.models.utils import (filter_gt_instances, rename_loss_dict,
reweight_loss_dict)
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox_project
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class SemiBaseDetector(BaseDetector):
"""Base class for semi-supervised detectors.
Semi-supervised detectors typically consisting of a teacher model
updated by exponential moving average and a student model updated
by gradient descent.
Args:
detector (:obj:`ConfigDict` or dict): The detector config.
semi_train_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised training config.
semi_test_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
detector: ConfigType,
semi_train_cfg: OptConfigType = None,
semi_test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.student = MODELS.build(detector)
self.teacher = MODELS.build(detector)
self.semi_train_cfg = semi_train_cfg
self.semi_test_cfg = semi_test_cfg
if self.semi_train_cfg.get('freeze_teacher', True) is True:
self.freeze(self.teacher)
@staticmethod
def freeze(model: nn.Module):
"""Freeze the model."""
model.eval()
for param in model.parameters():
param.requires_grad = False
def loss(self, multi_batch_inputs: Dict[str, Tensor],
multi_batch_data_samples: Dict[str, SampleList]) -> dict:
"""Calculate losses from multi-branch inputs and data samples.
Args:
multi_batch_inputs (Dict[str, Tensor]): The dict of multi-branch
input images, each value with shape (N, C, H, W).
Each value should usually be mean centered and std scaled.
multi_batch_data_samples (Dict[str, List[:obj:`DetDataSample`]]):
The dict of multi-branch data samples.
Returns:
dict: A dictionary of loss components
"""
losses = dict()
losses.update(**self.loss_by_gt_instances(
multi_batch_inputs['sup'], multi_batch_data_samples['sup']))
origin_pseudo_data_samples, batch_info = self.get_pseudo_instances(
multi_batch_inputs['unsup_teacher'],
multi_batch_data_samples['unsup_teacher'])
multi_batch_data_samples[
'unsup_student'] = self.project_pseudo_instances(
origin_pseudo_data_samples,
multi_batch_data_samples['unsup_student'])
losses.update(**self.loss_by_pseudo_instances(
multi_batch_inputs['unsup_student'],
multi_batch_data_samples['unsup_student'], batch_info))
return losses
def loss_by_gt_instances(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and ground-truth data
samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components
"""
losses = self.student.loss(batch_inputs, batch_data_samples)
sup_weight = self.semi_train_cfg.get('sup_weight', 1.)
return rename_loss_dict('sup_', reweight_loss_dict(losses, sup_weight))
def loss_by_pseudo_instances(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
batch_info: Optional[dict] = None) -> dict:
"""Calculate losses from a batch of inputs and pseudo data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
batch_info (dict): Batch information of teacher model
forward propagation process. Defaults to None.
Returns:
dict: A dictionary of loss components
"""
batch_data_samples = filter_gt_instances(
batch_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)
losses = self.student.loss(batch_inputs, batch_data_samples)
pseudo_instances_num = sum([
len(data_samples.gt_instances)
for data_samples in batch_data_samples
])
unsup_weight = self.semi_train_cfg.get(
'unsup_weight', 1.) if pseudo_instances_num > 0 else 0.
return rename_loss_dict('unsup_',
reweight_loss_dict(losses, unsup_weight))
@torch.no_grad()
def get_pseudo_instances(
self, batch_inputs: Tensor, batch_data_samples: SampleList
) -> Tuple[SampleList, Optional[dict]]:
"""Get pseudo instances from teacher model."""
self.teacher.eval()
results_list = self.teacher.predict(
batch_inputs, batch_data_samples, rescale=False)
batch_info = {}
for data_samples, results in zip(batch_data_samples, results_list):
data_samples.gt_instances = results.pred_instances
data_samples.gt_instances.bboxes = bbox_project(
data_samples.gt_instances.bboxes,
torch.from_numpy(data_samples.homography_matrix).inverse().to(
self.data_preprocessor.device), data_samples.ori_shape)
return batch_data_samples, batch_info
def project_pseudo_instances(self, batch_pseudo_instances: SampleList,
batch_data_samples: SampleList) -> SampleList:
"""Project pseudo instances."""
for pseudo_instances, data_samples in zip(batch_pseudo_instances,
batch_data_samples):
data_samples.gt_instances = copy.deepcopy(
pseudo_instances.gt_instances)
data_samples.gt_instances.bboxes = bbox_project(
data_samples.gt_instances.bboxes,
torch.tensor(data_samples.homography_matrix).to(
self.data_preprocessor.device), data_samples.img_shape)
wh_thr = self.semi_train_cfg.get('min_pseudo_bbox_wh', (1e-2, 1e-2))
return filter_gt_instances(batch_data_samples, wh_thr=wh_thr)
def predict(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Return the detection results of the
input images. The returns value is DetDataSample,
which usually contain 'pred_instances'. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
if self.semi_test_cfg.get('predict_on', 'teacher') == 'teacher':
return self.teacher(
batch_inputs, batch_data_samples, mode='predict')
else:
return self.student(
batch_inputs, batch_data_samples, mode='predict')
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple: A tuple of features from ``rpn_head`` and ``roi_head``
forward.
"""
if self.semi_test_cfg.get('forward_on', 'teacher') == 'teacher':
return self.teacher(
batch_inputs, batch_data_samples, mode='tensor')
else:
return self.student(
batch_inputs, batch_data_samples, mode='tensor')
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
if self.semi_test_cfg.get('extract_feat_on', 'teacher') == 'teacher':
return self.teacher.extract_feat(batch_inputs)
else:
return self.student.extract_feat(batch_inputs)
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Add teacher and student prefixes to model parameter names."""
if not any([
'student' in key or 'teacher' in key
for key in state_dict.keys()
]):
keys = list(state_dict.keys())
state_dict.update({'teacher.' + k: state_dict[k] for k in keys})
state_dict.update({'student.' + k: state_dict[k] for k in keys})
for k in keys:
state_dict.pop(k)
return super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
| 11,647 | 42.625468 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/kd_one_stage.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file
path or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
Defaults to True.
eval_teacher (bool): Set the train mode for teacher.
Defaults to True.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
"""
def __init__(
self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_config: Union[ConfigType, str, Path],
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher_model = MODELS.build(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(batch_inputs)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.loss(x, out_teacher, batch_data_samples)
return losses
def cuda(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling ``cuda`` function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to other device when calling ``to``
function."""
self.teacher_model.to(device=device)
return super().to(device=device)
def train(self, mode: bool = True) -> None:
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
| 4,987 | 39.552846 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/fast_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FastRCNN(TwoStageDetector):
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
def __init__(self,
backbone: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
| 925 | 33.296296 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/autoassign.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of AutoAssign. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of AutoAssign. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,798 | 39.886364 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/cascade_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,090 | 35.366667 | 74 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/deformable_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Dict, Tuple
import torch
import torch.nn.functional as F
from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention
from mmengine.model import xavier_init
from torch import Tensor, nn
from torch.nn.init import normal_
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from mmdet.utils import OptConfigType
from ..layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerEncoder, SinePositionalEncoding)
from .base_detr import DetectionTransformer
@MODELS.register_module()
class DeformableDETR(DetectionTransformer):
r"""Implementation of `Deformable DETR: Deformable Transformers for
End-to-End Object Detection <https://arxiv.org/abs/2010.04159>`_
Code is modified from the `official github repo
<https://github.com/fundamentalvision/Deformable-DETR>`_.
Args:
decoder (:obj:`ConfigDict` or dict, optional): Config of the
Transformer decoder. Defaults to None.
bbox_head (:obj:`ConfigDict` or dict, optional): Config for the
bounding box head module. Defaults to None.
with_box_refine (bool, optional): Whether to refine the references
in the decoder. Defaults to `False`.
as_two_stage (bool, optional): Whether to generate the proposal
from the outputs of encoder. Defaults to `False`.
num_feature_levels (int, optional): Number of feature levels.
Defaults to 4.
"""
def __init__(self,
*args,
decoder: OptConfigType = None,
bbox_head: OptConfigType = None,
with_box_refine: bool = False,
as_two_stage: bool = False,
num_feature_levels: int = 4,
**kwargs) -> None:
self.with_box_refine = with_box_refine
self.as_two_stage = as_two_stage
self.num_feature_levels = num_feature_levels
if bbox_head is not None:
assert 'share_pred_layer' not in bbox_head and \
'num_pred_layer' not in bbox_head and \
'as_two_stage' not in bbox_head, \
'The two keyword args `share_pred_layer`, `num_pred_layer`, ' \
'and `as_two_stage are set in `detector.__init__()`, users ' \
'should not set them in `bbox_head` config.'
# The last prediction layer is used to generate proposal
# from encode feature map when `as_two_stage` is `True`.
# And all the prediction layers should share parameters
# when `with_box_refine` is `True`.
bbox_head['share_pred_layer'] = not with_box_refine
bbox_head['num_pred_layer'] = (decoder['num_layers'] + 1) \
if self.as_two_stage else decoder['num_layers']
bbox_head['as_two_stage'] = as_two_stage
super().__init__(*args, decoder=decoder, bbox_head=bbox_head, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DeformableDetrTransformerEncoder(**self.encoder)
self.decoder = DeformableDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
if not self.as_two_stage:
self.query_embedding = nn.Embedding(self.num_queries,
self.embed_dims * 2)
# NOTE The query_embedding will be split into query and query_pos
# in self.pre_decoder, hence, the embed_dims are doubled.
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
self.level_embed = nn.Parameter(
torch.Tensor(self.num_feature_levels, self.embed_dims))
if self.as_two_stage:
self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims)
self.memory_trans_norm = nn.LayerNorm(self.embed_dims)
self.pos_trans_fc = nn.Linear(self.embed_dims * 2,
self.embed_dims * 2)
self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2)
else:
self.reference_points_fc = nn.Linear(self.embed_dims, 2)
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super().init_weights()
for coder in self.encoder, self.decoder:
for p in coder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
if self.as_two_stage:
nn.init.xavier_uniform_(self.memory_trans_fc.weight)
nn.init.xavier_uniform_(self.pos_trans_fc.weight)
else:
xavier_init(
self.reference_points_fc, distribution='uniform', bias=0.)
normal_(self.level_embed)
def pre_transformer(
self,
mlvl_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Tuple[Dict]:
"""Process image features before feeding them to the transformer.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
mlvl_feats (tuple[Tensor]): Multi-level features that may have
different resolutions, output from neck. Each feature has
shape (bs, dim, h_lvl, w_lvl), where 'lvl' means 'layer'.
batch_data_samples (list[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict]: The first dict contains the inputs of encoder and the
second dict contains the inputs of decoder.
- encoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_encoder()`, which includes 'feat', 'feat_mask',
and 'feat_pos'.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'memory_mask'.
"""
batch_size = mlvl_feats[0].size(0)
# construct binary masks for the transformer.
assert batch_data_samples is not None
batch_input_shape = batch_data_samples[0].batch_input_shape
img_shape_list = [sample.img_shape for sample in batch_data_samples]
input_img_h, input_img_w = batch_input_shape
masks = mlvl_feats[0].new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w = img_shape_list[img_id]
masks[img_id, :img_h, :img_w] = 0
# NOTE following the official DETR repo, non-zero values representing
# ignored positions, while zero values means valid positions.
mlvl_masks = []
mlvl_pos_embeds = []
for feat in mlvl_feats:
mlvl_masks.append(
F.interpolate(masks[None],
size=feat.shape[-2:]).to(torch.bool).squeeze(0))
mlvl_pos_embeds.append(self.positional_encoding(mlvl_masks[-1]))
feat_flatten = []
lvl_pos_embed_flatten = []
mask_flatten = []
spatial_shapes = []
for lvl, (feat, mask, pos_embed) in enumerate(
zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)):
batch_size, c, h, w = feat.shape
# [bs, c, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl, c]
feat = feat.view(batch_size, c, -1).permute(0, 2, 1)
pos_embed = pos_embed.view(batch_size, c, -1).permute(0, 2, 1)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
# [bs, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl]
mask = mask.flatten(1)
spatial_shape = (h, w)
feat_flatten.append(feat)
lvl_pos_embed_flatten.append(lvl_pos_embed)
mask_flatten.append(mask)
spatial_shapes.append(spatial_shape)
# (bs, num_feat_points, dim)
feat_flatten = torch.cat(feat_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
# (bs, num_feat_points), where num_feat_points = sum_lvl(h_lvl*w_lvl)
mask_flatten = torch.cat(mask_flatten, 1)
spatial_shapes = torch.as_tensor( # (num_level, 2)
spatial_shapes,
dtype=torch.long,
device=feat_flatten.device)
level_start_index = torch.cat((
spatial_shapes.new_zeros((1, )), # (num_level)
spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack( # (bs, num_level, 2)
[self.get_valid_ratio(m) for m in mlvl_masks], 1)
encoder_inputs_dict = dict(
feat=feat_flatten,
feat_mask=mask_flatten,
feat_pos=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios)
decoder_inputs_dict = dict(
memory_mask=mask_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios)
return encoder_inputs_dict, decoder_inputs_dict
def forward_encoder(self, feat: Tensor, feat_mask: Tensor,
feat_pos: Tensor, spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor) -> Dict:
"""Forward with Transformer encoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
feat (Tensor): Sequential features, has shape (bs, num_feat_points,
dim).
feat_mask (Tensor): ByteTensor, the padding mask of the features,
has shape (bs, num_feat_points).
feat_pos (Tensor): The positional embeddings of the features, has
shape (bs, num_feat_points, dim).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
Returns:
dict: The dictionary of encoder outputs, which includes the
`memory` of the encoder output.
"""
memory = self.encoder(
query=feat,
query_pos=feat_pos,
key_padding_mask=feat_mask, # for self_attn
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios)
encoder_outputs_dict = dict(
memory=memory,
memory_mask=feat_mask,
spatial_shapes=spatial_shapes)
return encoder_outputs_dict
def pre_decoder(self, memory: Tensor, memory_mask: Tensor,
spatial_shapes: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`, and `reference_points`.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points). It will only be used when
`as_two_stage` is `True`.
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
It will only be used when `as_two_stage` is `True`.
Returns:
tuple[dict, dict]: The decoder_inputs_dict and head_inputs_dict.
- decoder_inputs_dict (dict): The keyword dictionary args of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory', and `reference_points`. The reference_points of
decoder input here are 4D boxes when `as_two_stage` is `True`,
otherwise 2D points, although it has `points` in its name.
The reference_points in encoder is always 2D points.
- head_inputs_dict (dict): The keyword dictionary args of the
bbox_head functions, which includes `enc_outputs_class` and
`enc_outputs_coord`. They are both `None` when 'as_two_stage'
is `False`. The dict is empty when `self.training` is `False`.
"""
batch_size, _, c = memory.shape
if self.as_two_stage:
output_memory, output_proposals = \
self.gen_encoder_output_proposals(
memory, memory_mask, spatial_shapes)
enc_outputs_class = self.bbox_head.cls_branches[
self.decoder.num_layers](
output_memory)
enc_outputs_coord_unact = self.bbox_head.reg_branches[
self.decoder.num_layers](output_memory) + output_proposals
enc_outputs_coord = enc_outputs_coord_unact.sigmoid()
# We only use the first channel in enc_outputs_class as foreground,
# the other (num_classes - 1) channels are actually not used.
# Its targets are set to be 0s, which indicates the first
# class (foreground) because we use [0, num_classes - 1] to
# indicate class labels, background class is indicated by
# num_classes (similar convention in RPN).
# See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa
# This follows the official implementation of Deformable DETR.
topk_proposals = torch.topk(
enc_outputs_class[..., 0], self.num_queries, dim=1)[1]
topk_coords_unact = torch.gather(
enc_outputs_coord_unact, 1,
topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
topk_coords_unact = topk_coords_unact.detach()
reference_points = topk_coords_unact.sigmoid()
pos_trans_out = self.pos_trans_fc(
self.get_proposal_pos_embed(topk_coords_unact))
pos_trans_out = self.pos_trans_norm(pos_trans_out)
query_pos, query = torch.split(pos_trans_out, c, dim=2)
else:
enc_outputs_class, enc_outputs_coord = None, None
query_embed = self.query_embedding.weight
query_pos, query = torch.split(query_embed, c, dim=1)
query_pos = query_pos.unsqueeze(0).expand(batch_size, -1, -1)
query = query.unsqueeze(0).expand(batch_size, -1, -1)
reference_points = self.reference_points_fc(query_pos).sigmoid()
decoder_inputs_dict = dict(
query=query,
query_pos=query_pos,
memory=memory,
reference_points=reference_points)
head_inputs_dict = dict(
enc_outputs_class=enc_outputs_class,
enc_outputs_coord=enc_outputs_coord) if self.training else dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, reference_points: Tensor,
spatial_shapes: Tensor, level_start_index: Tensor,
valid_ratios: Tensor) -> Dict:
"""Forward with Transformer decoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
reference_points (Tensor): The initial reference, has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h) when `as_two_stage` is `True`, otherwise has
shape (bs, num_queries, 2) with the last dimension arranged as
(cx, cy).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output and `references` including
the initial and intermediate reference_points.
"""
inter_states, inter_references = self.decoder(
query=query,
value=memory,
query_pos=query_pos,
key_padding_mask=memory_mask, # for cross_attn
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reg_branches=self.bbox_head.reg_branches
if self.with_box_refine else None)
references = [reference_points, *inter_references]
decoder_outputs_dict = dict(
hidden_states=inter_states, references=references)
return decoder_outputs_dict
@staticmethod
def get_valid_ratio(mask: Tensor) -> Tensor:
"""Get the valid radios of feature map in a level.
.. code:: text
|---> valid_W <---|
---+-----------------+-----+---
A | | | A
| | | | |
| | | | |
valid_H | | | |
| | | | H
| | | | |
V | | | |
---+-----------------+ | |
| | V
+-----------------------+---
|---------> W <---------|
The valid_ratios are defined as:
r_h = valid_H / H, r_w = valid_W / W
They are the factors to re-normalize the relative coordinates of the
image to the relative coordinates of the current level feature map.
Args:
mask (Tensor): Binary mask of a feature map, has shape (bs, H, W).
Returns:
Tensor: valid ratios [r_w, r_h] of a feature map, has shape (1, 2).
"""
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def gen_encoder_output_proposals(
self, memory: Tensor, memory_mask: Tensor,
spatial_shapes: Tensor) -> Tuple[Tensor, Tensor]:
"""Generate proposals from encoded memory. The function will only be
used when `as_two_stage` is `True`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
Returns:
tuple: A tuple of transformed memory and proposals.
- output_memory (Tensor): The transformed memory for obtaining
top-k proposals, has shape (bs, num_feat_points, dim).
- output_proposals (Tensor): The inverse-normalized proposal, has
shape (batch_size, num_keys, 4) with the last dimension arranged
as (cx, cy, w, h).
"""
bs = memory.size(0)
proposals = []
_cur = 0 # start index in the sequence of the current level
for lvl, (H, W) in enumerate(spatial_shapes):
mask_flatten_ = memory_mask[:,
_cur:(_cur + H * W)].view(bs, H, W, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1).unsqueeze(-1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1).unsqueeze(-1)
grid_y, grid_x = torch.meshgrid(
torch.linspace(
0, H - 1, H, dtype=torch.float32, device=memory.device),
torch.linspace(
0, W - 1, W, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W, valid_H], 1).view(bs, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(bs, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
proposal = torch.cat((grid, wh), -1).view(bs, -1, 4)
proposals.append(proposal)
_cur += (H * W)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) &
(output_proposals < 0.99)).all(
-1, keepdim=True)
# inverse_sigmoid
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(
memory_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(
~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(
memory_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid,
float(0))
output_memory = self.memory_trans_fc(output_memory)
output_memory = self.memory_trans_norm(output_memory)
# [bs, sum(hw), 2]
return output_memory, output_proposals
@staticmethod
def get_proposal_pos_embed(proposals: Tensor,
num_pos_feats: int = 128,
temperature: int = 10000) -> Tensor:
"""Get the position embedding of the proposal.
Args:
proposals (Tensor): Not normalized proposals, has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h).
num_pos_feats (int, optional): The feature dimension for each
position along x, y, w, and h-axis. Note the final returned
dimension for each position is 4 times of num_pos_feats.
Default to 128.
temperature (int, optional): The temperature used for scaling the
position embedding. Defaults to 10000.
Returns:
Tensor: The position embedding of proposal, has shape
(bs, num_queries, num_pos_feats * 4), with the last dimension
arranged as (cx, cy, w, h)
"""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals = proposals.sigmoid() * scale
# N, L, 4, 128
pos = proposals[:, :, :, None] / dim_t
# N, L, 4, 64, 2
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos
| 25,779 | 46.564576 | 132 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/nasfcos.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class NASFCOS(SingleStageDetector):
"""Implementation of `NAS-FCOS: Fast Neural Architecture Search for Object
Detection. <https://arxiv.org/abs/1906.0442>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of NASFCOS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of NASFCOS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,789 | 39.681818 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/mask_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
| 1,023 | 32.032258 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/soft_teacher.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Optional, Tuple
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.utils import (filter_gt_instances, rename_loss_dict,
reweight_loss_dict)
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi, bbox_project
from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig
from ..utils.misc import unpack_gt_instances
from .semi_base import SemiBaseDetector
@MODELS.register_module()
class SoftTeacher(SemiBaseDetector):
r"""Implementation of `End-to-End Semi-Supervised Object Detection
with Soft Teacher <https://arxiv.org/abs/2106.09018>`_
Args:
detector (:obj:`ConfigDict` or dict): The detector config.
semi_train_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised training config.
semi_test_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
detector: ConfigType,
semi_train_cfg: OptConfigType = None,
semi_test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
detector=detector,
semi_train_cfg=semi_train_cfg,
semi_test_cfg=semi_test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
def loss_by_pseudo_instances(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
batch_info: Optional[dict] = None) -> dict:
"""Calculate losses from a batch of inputs and pseudo data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
batch_info (dict): Batch information of teacher model
forward propagation process. Defaults to None.
Returns:
dict: A dictionary of loss components
"""
x = self.student.extract_feat(batch_inputs)
losses = {}
rpn_losses, rpn_results_list = self.rpn_loss_by_pseudo_instances(
x, batch_data_samples)
losses.update(**rpn_losses)
losses.update(**self.rcnn_cls_loss_by_pseudo_instances(
x, rpn_results_list, batch_data_samples, batch_info))
losses.update(**self.rcnn_reg_loss_by_pseudo_instances(
x, rpn_results_list, batch_data_samples))
unsup_weight = self.semi_train_cfg.get('unsup_weight', 1.)
return rename_loss_dict('unsup_',
reweight_loss_dict(losses, unsup_weight))
@torch.no_grad()
def get_pseudo_instances(
self, batch_inputs: Tensor, batch_data_samples: SampleList
) -> Tuple[SampleList, Optional[dict]]:
"""Get pseudo instances from teacher model."""
assert self.teacher.with_bbox, 'Bbox head must be implemented.'
x = self.teacher.extract_feat(batch_inputs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
rpn_results_list = self.teacher.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
results_list = self.teacher.roi_head.predict(
x, rpn_results_list, batch_data_samples, rescale=False)
for data_samples, results in zip(batch_data_samples, results_list):
data_samples.gt_instances = results
batch_data_samples = filter_gt_instances(
batch_data_samples,
score_thr=self.semi_train_cfg.pseudo_label_initial_score_thr)
reg_uncs_list = self.compute_uncertainty_with_aug(
x, batch_data_samples)
for data_samples, reg_uncs in zip(batch_data_samples, reg_uncs_list):
data_samples.gt_instances['reg_uncs'] = reg_uncs
data_samples.gt_instances.bboxes = bbox_project(
data_samples.gt_instances.bboxes,
torch.from_numpy(data_samples.homography_matrix).inverse().to(
self.data_preprocessor.device), data_samples.ori_shape)
batch_info = {
'feat': x,
'img_shape': [],
'homography_matrix': [],
'metainfo': []
}
for data_samples in batch_data_samples:
batch_info['img_shape'].append(data_samples.img_shape)
batch_info['homography_matrix'].append(
torch.from_numpy(data_samples.homography_matrix).to(
self.data_preprocessor.device))
batch_info['metainfo'].append(data_samples.metainfo)
return batch_data_samples, batch_info
def rpn_loss_by_pseudo_instances(self, x: Tuple[Tensor],
batch_data_samples: SampleList) -> dict:
"""Calculate rpn loss from a batch of inputs and pseudo data samples.
Args:
x (tuple[Tensor]): Features from FPN.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
Returns:
dict: A dictionary of rpn loss components
"""
rpn_data_samples = copy.deepcopy(batch_data_samples)
rpn_data_samples = filter_gt_instances(
rpn_data_samples, score_thr=self.semi_train_cfg.rpn_pseudo_thr)
proposal_cfg = self.student.train_cfg.get('rpn_proposal',
self.student.test_cfg.rpn)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
rpn_losses, rpn_results_list = self.student.rpn_head.loss_and_predict(
x, rpn_data_samples, proposal_cfg=proposal_cfg)
for key in rpn_losses.keys():
if 'loss' in key and 'rpn' not in key:
rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)
return rpn_losses, rpn_results_list
def rcnn_cls_loss_by_pseudo_instances(self, x: Tuple[Tensor],
unsup_rpn_results_list: InstanceList,
batch_data_samples: SampleList,
batch_info: dict) -> dict:
"""Calculate classification loss from a batch of inputs and pseudo data
samples.
Args:
x (tuple[Tensor]): List of multi-level img features.
unsup_rpn_results_list (list[:obj:`InstanceData`]):
List of region proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
batch_info (dict): Batch information of teacher model
forward propagation process.
Returns:
dict[str, Tensor]: A dictionary of rcnn
classification loss components
"""
rpn_results_list = copy.deepcopy(unsup_rpn_results_list)
cls_data_samples = copy.deepcopy(batch_data_samples)
cls_data_samples = filter_gt_instances(
cls_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)
outputs = unpack_gt_instances(cls_data_samples)
batch_gt_instances, batch_gt_instances_ignore, _ = outputs
# assign gts and sample proposals
num_imgs = len(cls_data_samples)
sampling_results = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.student.roi_head.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.student.roi_head.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
selected_bboxes = [res.priors for res in sampling_results]
rois = bbox2roi(selected_bboxes)
bbox_results = self.student.roi_head._bbox_forward(x, rois)
# cls_reg_targets is a tuple of labels, label_weights,
# and bbox_targets, bbox_weights
cls_reg_targets = self.student.roi_head.bbox_head.get_targets(
sampling_results, self.student.train_cfg.rcnn)
selected_results_list = []
for bboxes, data_samples, teacher_matrix, teacher_img_shape in zip(
selected_bboxes, batch_data_samples,
batch_info['homography_matrix'], batch_info['img_shape']):
student_matrix = torch.tensor(
data_samples.homography_matrix, device=teacher_matrix.device)
homography_matrix = teacher_matrix @ student_matrix.inverse()
projected_bboxes = bbox_project(bboxes, homography_matrix,
teacher_img_shape)
selected_results_list.append(InstanceData(bboxes=projected_bboxes))
with torch.no_grad():
results_list = self.teacher.roi_head.predict_bbox(
batch_info['feat'],
batch_info['metainfo'],
selected_results_list,
rcnn_test_cfg=None,
rescale=False)
bg_score = torch.cat(
[results.scores[:, -1] for results in results_list])
# cls_reg_targets[0] is labels
neg_inds = cls_reg_targets[
0] == self.student.roi_head.bbox_head.num_classes
# cls_reg_targets[1] is label_weights
cls_reg_targets[1][neg_inds] = bg_score[neg_inds].detach()
losses = self.student.roi_head.bbox_head.loss(
bbox_results['cls_score'], bbox_results['bbox_pred'], rois,
*cls_reg_targets)
# cls_reg_targets[1] is label_weights
losses['loss_cls'] = losses['loss_cls'] * len(
cls_reg_targets[1]) / max(sum(cls_reg_targets[1]), 1.0)
return losses
def rcnn_reg_loss_by_pseudo_instances(
self, x: Tuple[Tensor], unsup_rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Calculate rcnn regression loss from a batch of inputs and pseudo
data samples.
Args:
x (tuple[Tensor]): List of multi-level img features.
unsup_rpn_results_list (list[:obj:`InstanceData`]):
List of region proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
Returns:
dict[str, Tensor]: A dictionary of rcnn
regression loss components
"""
rpn_results_list = copy.deepcopy(unsup_rpn_results_list)
reg_data_samples = copy.deepcopy(batch_data_samples)
for data_samples in reg_data_samples:
if data_samples.gt_instances.bboxes.shape[0] > 0:
data_samples.gt_instances = data_samples.gt_instances[
data_samples.gt_instances.reg_uncs <
self.semi_train_cfg.reg_pseudo_thr]
roi_losses = self.student.roi_head.loss(x, rpn_results_list,
reg_data_samples)
return {'loss_bbox': roi_losses['loss_bbox']}
def compute_uncertainty_with_aug(
self, x: Tuple[Tensor],
batch_data_samples: SampleList) -> List[Tensor]:
"""Compute uncertainty with augmented bboxes.
Args:
x (tuple[Tensor]): List of multi-level img features.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
Returns:
list[Tensor]: A list of uncertainty for pseudo bboxes.
"""
auged_results_list = self.aug_box(batch_data_samples,
self.semi_train_cfg.jitter_times,
self.semi_train_cfg.jitter_scale)
# flatten
auged_results_list = [
InstanceData(bboxes=auged.reshape(-1, auged.shape[-1]))
for auged in auged_results_list
]
self.teacher.roi_head.test_cfg = None
results_list = self.teacher.roi_head.predict(
x, auged_results_list, batch_data_samples, rescale=False)
self.teacher.roi_head.test_cfg = self.teacher.test_cfg.rcnn
reg_channel = max(
[results.bboxes.shape[-1] for results in results_list]) // 4
bboxes = [
results.bboxes.reshape(self.semi_train_cfg.jitter_times, -1,
results.bboxes.shape[-1])
if results.bboxes.numel() > 0 else results.bboxes.new_zeros(
self.semi_train_cfg.jitter_times, 0, 4 * reg_channel).float()
for results in results_list
]
box_unc = [bbox.std(dim=0) for bbox in bboxes]
bboxes = [bbox.mean(dim=0) for bbox in bboxes]
labels = [
data_samples.gt_instances.labels
for data_samples in batch_data_samples
]
if reg_channel != 1:
bboxes = [
bbox.reshape(bbox.shape[0], reg_channel,
4)[torch.arange(bbox.shape[0]), label]
for bbox, label in zip(bboxes, labels)
]
box_unc = [
unc.reshape(unc.shape[0], reg_channel,
4)[torch.arange(unc.shape[0]), label]
for unc, label in zip(box_unc, labels)
]
box_shape = [(bbox[:, 2:4] - bbox[:, :2]).clamp(min=1.0)
for bbox in bboxes]
box_unc = [
torch.mean(
unc / wh[:, None, :].expand(-1, 2, 2).reshape(-1, 4), dim=-1)
if wh.numel() > 0 else unc for unc, wh in zip(box_unc, box_shape)
]
return box_unc
@staticmethod
def aug_box(batch_data_samples, times, frac):
"""Augment bboxes with jitter."""
def _aug_single(box):
box_scale = box[:, 2:4] - box[:, :2]
box_scale = (
box_scale.clamp(min=1)[:, None, :].expand(-1, 2,
2).reshape(-1, 4))
aug_scale = box_scale * frac # [n,4]
offset = (
torch.randn(times, box.shape[0], 4, device=box.device) *
aug_scale[None, ...])
new_box = box.clone()[None, ...].expand(times, box.shape[0],
-1) + offset
return new_box
return [
_aug_single(data_samples.gt_instances.bboxes)
for data_samples in batch_data_samples
]
| 16,860 | 43.488127 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/d2_wrapper.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import BaseBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
from mmdet.utils import ConfigType
from .base import BaseDetector
try:
import detectron2
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.structures.masks import BitMasks as D2_BitMasks
from detectron2.structures.masks import PolygonMasks as D2_PolygonMasks
from detectron2.utils.events import EventStorage
except ImportError:
detectron2 = None
def _to_cfgnode_list(cfg: ConfigType,
config_list: list = [],
father_name: str = 'MODEL') -> tuple:
"""Convert the key and value of mmengine.ConfigDict into a list.
Args:
cfg (ConfigDict): The detectron2 model config.
config_list (list): A list contains the key and value of ConfigDict.
Defaults to [].
father_name (str): The father name add before the key.
Defaults to "MODEL".
Returns:
tuple:
- config_list: A list contains the key and value of ConfigDict.
- father_name (str): The father name add before the key.
Defaults to "MODEL".
"""
for key, value in cfg.items():
name = f'{father_name}.{key.upper()}'
if isinstance(value, ConfigDict) or isinstance(value, dict):
config_list, fater_name = \
_to_cfgnode_list(value, config_list, name)
else:
config_list.append(name)
config_list.append(value)
return config_list, father_name
def convert_d2_pred_to_datasample(data_samples: SampleList,
d2_results_list: list) -> SampleList:
"""Convert the Detectron2's result to DetDataSample.
Args:
data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
d2_results_list (list): The list of the results of Detectron2's model.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(data_samples) == len(d2_results_list)
for data_sample, d2_results in zip(data_samples, d2_results_list):
d2_instance = d2_results['instances']
results = InstanceData()
results.bboxes = d2_instance.pred_boxes.tensor
results.scores = d2_instance.scores
results.labels = d2_instance.pred_classes
if d2_instance.has('pred_masks'):
results.masks = d2_instance.pred_masks
data_sample.pred_instances = results
return data_samples
@MODELS.register_module()
class Detectron2Wrapper(BaseDetector):
"""Wrapper of a Detectron2 model. Input/output formats of this class follow
MMDetection's convention, so a Detectron2 model can be trained and
evaluated in MMDetection.
Args:
detector (:obj:`ConfigDict` or dict): The module config of
Detectron2.
bgr_to_rgb (bool): whether to convert image from BGR to RGB.
Defaults to False.
rgb_to_bgr (bool): whether to convert image from RGB to BGR.
Defaults to False.
"""
def __init__(self,
detector: ConfigType,
bgr_to_rgb: bool = False,
rgb_to_bgr: bool = False) -> None:
if detectron2 is None:
raise ImportError('Please install Detectron2 first')
assert not (bgr_to_rgb and rgb_to_bgr), (
'`bgr2rgb` and `rgb2bgr` cannot be set to True at the same time')
super().__init__()
self._channel_conversion = rgb_to_bgr or bgr_to_rgb
cfgnode_list, _ = _to_cfgnode_list(detector)
self.cfg = get_cfg()
self.cfg.merge_from_list(cfgnode_list)
self.d2_model = build_model(self.cfg)
self.storage = EventStorage()
def init_weights(self) -> None:
"""Initialization Backbone.
NOTE: The initialization of other layers are in Detectron2,
if users want to change the initialization way, please
change the code in Detectron2.
"""
from detectron2.checkpoint import DetectionCheckpointer
checkpointer = DetectionCheckpointer(model=self.d2_model)
checkpointer.load(self.cfg.MODEL.WEIGHTS, checkpointables=[])
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, tuple]:
"""Calculate losses from a batch of inputs and data samples.
The inputs will first convert to the Detectron2 type and feed into
D2 models.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
d2_batched_inputs = self._convert_to_d2_inputs(
batch_inputs=batch_inputs,
batch_data_samples=batch_data_samples,
training=True)
with self.storage as storage: # noqa
losses = self.d2_model(d2_batched_inputs)
# storage contains some training information, such as cls_accuracy.
# you can use storage.latest() to get the detail information
return losses
def predict(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
The inputs will first convert to the Detectron2 type and feed into
D2 models. And the results will convert back to the MMDet type.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
d2_batched_inputs = self._convert_to_d2_inputs(
batch_inputs=batch_inputs,
batch_data_samples=batch_data_samples,
training=False)
# results in detectron2 has already rescale
d2_results_list = self.d2_model(d2_batched_inputs)
batch_data_samples = convert_d2_pred_to_datasample(
data_samples=batch_data_samples, d2_results_list=d2_results_list)
return batch_data_samples
def _forward(self, *args, **kwargs):
"""Network forward process.
Usually includes backbone, neck and head forward without any post-
processing.
"""
raise NotImplementedError(
f'`_forward` is not implemented in {self.__class__.__name__}')
def extract_feat(self, *args, **kwargs):
"""Extract features from images.
`extract_feat` will not be used in obj:``Detectron2Wrapper``.
"""
pass
def _convert_to_d2_inputs(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
training=True) -> list:
"""Convert inputs type to support Detectron2's model.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
training (bool): Whether to enable training time processing.
Returns:
list[dict]: A list of dict, which will be fed into Detectron2's
model. And the dict usually contains following keys.
- image (Tensor): Image in (C, H, W) format.
- instances (Instances): GT Instance.
- height (int): the output height resolution of the model
- width (int): the output width resolution of the model
"""
from detectron2.data.detection_utils import filter_empty_instances
from detectron2.structures import Boxes, Instances
batched_d2_inputs = []
for image, data_samples in zip(batch_inputs, batch_data_samples):
d2_inputs = dict()
# deal with metainfo
meta_info = data_samples.metainfo
d2_inputs['file_name'] = meta_info['img_path']
d2_inputs['height'], d2_inputs['width'] = meta_info['ori_shape']
d2_inputs['image_id'] = meta_info['img_id']
# deal with image
if self._channel_conversion:
image = image[[2, 1, 0], ...]
d2_inputs['image'] = image
# deal with gt_instances
gt_instances = data_samples.gt_instances
d2_instances = Instances(meta_info['img_shape'])
gt_boxes = gt_instances.bboxes
# TODO: use mmdet.structures.box.get_box_tensor after PR 8658
# has merged
if isinstance(gt_boxes, BaseBoxes):
gt_boxes = gt_boxes.tensor
d2_instances.gt_boxes = Boxes(gt_boxes)
d2_instances.gt_classes = gt_instances.labels
if gt_instances.get('masks', None) is not None:
gt_masks = gt_instances.masks
if isinstance(gt_masks, PolygonMasks):
d2_instances.gt_masks = D2_PolygonMasks(gt_masks.masks)
elif isinstance(gt_masks, BitmapMasks):
d2_instances.gt_masks = D2_BitMasks(gt_masks.masks)
else:
raise TypeError('The type of `gt_mask` can be '
'`PolygonMasks` or `BitMasks`, but get '
f'{type(gt_masks)}.')
# convert to cpu and convert back to cuda to avoid
# some potential error
if training:
device = gt_boxes.device
d2_instances = filter_empty_instances(
d2_instances.to('cpu')).to(device)
d2_inputs['instances'] = d2_instances
batched_d2_inputs.append(d2_inputs)
return batched_d2_inputs
| 11,772 | 39.318493 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/paa.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of PAA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of PAA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,679 | 39 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/faster_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
| 999 | 33.482759 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/grid_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,118 | 31.911765 | 75 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/rtmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RTMDet(SingleStageDetector):
"""Implementation of RTMDet.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
use_syncbn: bool = True) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# TODO: Waiting for mmengine support
if use_syncbn and get_world_size() > 1:
torch.nn.SyncBatchNorm.convert_sync_batchnorm(self)
print_log('Using SyncBatchNorm()', 'current')
| 2,073 | 38.132075 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/gfl_increment_erd.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from collections import OrderedDict
from typing import List, Union
import torch
from mmengine import Config
from mmengine.registry import (MODELS)
from mmengine.runner.checkpoint import load_checkpoint, load_state_dict
from torch import Tensor
from ..utils import multi_apply
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .gfl import GFL
@MODELS.register_module()
class GFLIncrementERD(GFL):
"""Implementation of `GFL <https://arxiv.org/abs/2006.04388>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of GFL. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of GFL. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
ori_setting: ConfigType,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
latest_model_flag=True,
top_k=100,
dist_loss_weight=1,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
self.top_k = top_k
self.dist_loss_weight = dist_loss_weight
if latest_model_flag:
self.load_base_detector(ori_setting)
self._is_init = True
def _load_checkpoint_for_new_model(self, checkpoint_file, map_location=None, strict=True, logger=None):
# load ckpt
checkpoint = torch.load(checkpoint_file, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(checkpoint_file))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k,
v in checkpoint['state_dict'].items()}
# modify cls head size of state_dict
added_branch_weight = self.bbox_head.gfl_cls.weight[self.ori_num_classes:, ...]
added_branch_bias = self.bbox_head.gfl_cls.bias[self.ori_num_classes:, ...]
state_dict['bbox_head.gfl_cls.weight'] = torch.cat(
(state_dict['bbox_head.gfl_cls.weight'], added_branch_weight), dim=0)
state_dict['bbox_head.gfl_cls.bias'] = torch.cat(
(state_dict['bbox_head.gfl_cls.bias'], added_branch_bias), dim=0)
# load state_dict
if hasattr(self, 'module'):
load_state_dict(self.module, state_dict, strict, logger)
else:
load_state_dict(self, state_dict, strict, logger)
def load_base_detector(self, ori_setting):
"""
Initialize detector from config file.
:param ori_setting:
:return:
"""
assert os.path.isfile(ori_setting['ori_checkpoint_file']), '{} is not a valid file'.format(
ori_setting['ori_checkpoint_file'])
##### init original model & frozen it #####
# build model
ori_cfg = Config.fromfile(ori_setting['ori_config_file'])
if hasattr(ori_cfg.model, 'latest_model_flag'):
ori_cfg.model.latest_model_flag = False
ori_model = MODELS.build(ori_cfg.model)
# load checkpoint
load_checkpoint(ori_model, ori_setting.ori_checkpoint_file, strict=True)
# # set to eval mode
ori_model.eval()
# ori_model.forward = ori_model.forward_dummy
# # set requires_grad of all parameters to False
for param in ori_model.parameters():
param.requires_grad = False
# ##### init original branchs of new model #####
self.ori_num_classes = ori_setting.ori_num_classes
self._load_checkpoint_for_new_model(ori_setting.ori_checkpoint_file)
print('======> load base checkpoint for new model from {}'.format(ori_setting.ori_checkpoint_file))
self.ori_model = ori_model
def forward_ori_model(self, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (Tensor): Input to the model.
Returns:
outs (Tuple(List[Tensor])): Three model outputs.
# cls_scores (List[Tensor]): Classification scores for each FPN level.
# bbox_preds (List[Tensor]): BBox predictions for each FPN level.
# centernesses (List[Tensor]): Centernesses predictions for each FPN level.
"""
# forward the model without gradients
with torch.no_grad():
outs = self.ori_model(img)
return outs
def sel_pos_single(self, cat_cls_scores, cat_bbox_preds):
# select topk for classifation
cat_conf = cat_cls_scores.sigmoid()
max_scores, _ = cat_conf.max(dim=-1)
cls_thr = max_scores.mean() + 2 * max_scores.std()
valid_mask = max_scores > cls_thr
topk_cls_inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
topk_cls_scores = cat_cls_scores.gather( # shape:(N,dim)
0, topk_cls_inds.unsqueeze(-1).expand(-1, cat_cls_scores.size(-1)))
# select topk for regression
max_bbox, _ = cat_bbox_preds.max(dim=-1)
bbox_thr = max_bbox.mean() + 2 * max_bbox.std()
bbox_valid_mask = max_bbox > bbox_thr
topk_bbox_inds = bbox_valid_mask.nonzero(as_tuple=False).squeeze(1)
topk_bbox_preds = cat_bbox_preds.gather(
0, topk_bbox_inds.unsqueeze(-1).expand(-1, cat_bbox_preds.size(-1)))
return topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds
def sel_pos(self, cls_scores, bbox_preds):
"""Select positive predictions based on classification scores.
Args:
model (nn.Module): The loaded detector.
cls_scores (List[Tensor]): Classification scores for each FPN level.
bbox_preds (List[Tensor]): BBox predictions for each FPN level.
#centernesses (List[Tensor]): Centernesses predictions for each FPN level.
Returns:
cat_cls_scores (Tensor): FPN concatenated classification scores.
#cat_centernesses (Tensor): FPN concatenated centernesses.
topk_bbox_preds (Tensor): Selected top-k bbox predictions.
topk_inds (Tensor): Selected top-k indices.
"""
assert len(cls_scores) == len(bbox_preds)
num_imgs = cls_scores[0].size(0)
cat_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(
num_imgs, -1, self.ori_model.bbox_head.cls_out_channels)
for cls_score in cls_scores
]
cat_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4 * (self.ori_model.bbox_head.reg_max + 1)) # ori:4
for bbox_pred in bbox_preds
]
cat_cls_scores = torch.cat(cat_cls_scores, dim=1)
cat_bbox_preds = torch.cat(cat_bbox_preds, dim=1)
topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds = multi_apply(
self.sel_pos_single,
cat_cls_scores,
cat_bbox_preds)
return topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Union[dict, list]:
# get original model outputs
ori_outs = self.ori_model(batch_inputs)
# select positive predictions from original model
topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds = self.sel_pos(*ori_outs)
# get new model outputs
x = self.extract_feat(batch_inputs)
new_outs = self.bbox_head(x)
# calculate losses including general losses of new model and distillation losses of original model
loss_inputs = (ori_outs, new_outs, batch_data_samples, \
topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds,
self.ori_num_classes, self.dist_loss_weight, self)
losses = self.bbox_head.loss(*loss_inputs)
return losses
| 9,276 | 40.977376 | 116 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/yolact.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class YOLACT(SingleStageInstanceSegmentor):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,045 | 35.068966 | 72 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/base_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class DetectionTransformer(BaseDetector, metaclass=ABCMeta):
r"""Base class for Detection Transformer.
In Detection Transformer, an encoder is used to process output features of
neck, then several queries interact with the encoder features using a
decoder and do the regression and classification with the bounding box
head.
Args:
backbone (:obj:`ConfigDict` or dict): Config of the backbone.
neck (:obj:`ConfigDict` or dict, optional): Config of the neck.
Defaults to None.
encoder (:obj:`ConfigDict` or dict, optional): Config of the
Transformer encoder. Defaults to None.
decoder (:obj:`ConfigDict` or dict, optional): Config of the
Transformer decoder. Defaults to None.
bbox_head (:obj:`ConfigDict` or dict, optional): Config for the
bounding box head module. Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict, optional): Config
of the positional encoding module. Defaults to None.
num_queries (int, optional): Number of decoder query in Transformer.
Defaults to 100.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
the bounding box head module. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
the bounding box head module. Defaults to None.
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`BaseDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
encoder: OptConfigType = None,
decoder: OptConfigType = None,
bbox_head: OptConfigType = None,
positional_encoding: OptConfigType = None,
num_queries: int = 100,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
# process args
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.encoder = encoder
self.decoder = decoder
self.positional_encoding = positional_encoding
self.num_queries = num_queries
# init model layers
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
self.bbox_head = MODELS.build(bbox_head)
self._init_layers()
@abstractmethod
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
pass
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, list]:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (bs, dim, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats,
batch_data_samples)
losses = self.bbox_head.loss(
**head_inputs_dict, batch_data_samples=batch_data_samples)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the input images.
Each DetDataSample usually contain 'pred_instances'. And the
`pred_instances` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats,
batch_data_samples)
results_list = self.bbox_head.predict(
**head_inputs_dict,
rescale=rescale,
batch_data_samples=batch_data_samples)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
def _forward(
self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W).
batch_data_samples (List[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[Tensor]: A tuple of features from ``bbox_head`` forward.
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats,
batch_data_samples)
results = self.bbox_head.forward(**head_inputs_dict)
return results
def forward_transformer(self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Dict:
"""Forward process of Transformer, which includes four steps:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'. We
summarized the parameters flow of the existing DETR-like detector,
which can be illustrated as follow:
.. code:: text
img_feats & batch_data_samples
|
V
+-----------------+
| pre_transformer |
+-----------------+
| |
| V
| +-----------------+
| | forward_encoder |
| +-----------------+
| |
| V
| +---------------+
| | pre_decoder |
| +---------------+
| | |
V V |
+-----------------+ |
| forward_decoder | |
+-----------------+ |
| |
V V
head_inputs_dict
Args:
img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each
feature map has shape (bs, dim, H, W).
batch_data_samples (list[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
dict: The dictionary of bbox_head function inputs, which always
includes the `hidden_states` of the decoder output and may contain
`references` including the initial and intermediate references.
"""
encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer(
img_feats, batch_data_samples)
encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)
tmp_dec_in, head_inputs_dict = self.pre_decoder(**encoder_outputs_dict)
decoder_inputs_dict.update(tmp_dec_in)
decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)
head_inputs_dict.update(decoder_outputs_dict)
return head_inputs_dict
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor, has shape (bs, dim, H, W).
Returns:
tuple[Tensor]: Tuple of feature maps from neck. Each feature map
has shape (bs, dim, H, W).
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
@abstractmethod
def pre_transformer(
self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Tuple[Dict, Dict]:
"""Process image features before feeding them to the transformer.
Args:
img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each
feature map has shape (bs, dim, H, W).
batch_data_samples (list[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict, dict]: The first dict contains the inputs of encoder
and the second dict contains the inputs of decoder.
- encoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_encoder()`, which includes 'feat', 'feat_mask',
'feat_pos', and other algorithm-specific arguments.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'memory_mask', and
other algorithm-specific arguments.
"""
pass
@abstractmethod
def forward_encoder(self, feat: Tensor, feat_mask: Tensor,
feat_pos: Tensor, **kwargs) -> Dict:
"""Forward with Transformer encoder.
Args:
feat (Tensor): Sequential features, has shape (bs, num_feat_points,
dim).
feat_mask (Tensor): ByteTensor, the padding mask of the features,
has shape (bs, num_feat_points).
feat_pos (Tensor): The positional embeddings of the features, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of encoder outputs, which includes the
`memory` of the encoder output and other algorithm-specific
arguments.
"""
pass
@abstractmethod
def pre_decoder(self, memory: Tensor, **kwargs) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`, and `reference_points`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword dictionary args of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory', and other algorithm-specific arguments.
- head_inputs_dict (dict): The keyword dictionary args of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
pass
@abstractmethod
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
**kwargs) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output, `references` including
the initial and intermediate reference_points, and other
algorithm-specific arguments.
"""
pass
| 14,268 | 41.84985 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/rpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RPN(SingleStageDetector):
"""Implementation of Region Proposal Network.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
rpn_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super(SingleStageDetector, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
self.neck = MODELS.build(neck) if neck is not None else None
rpn_train_cfg = train_cfg['rpn'] if train_cfg is not None else None
rpn_head_num_classes = rpn_head.get('num_classes', 1)
if rpn_head_num_classes != 1:
warnings.warn('The `num_classes` should be 1 in RPN, but get '
f'{rpn_head_num_classes}, please set '
'rpn_head.num_classes = 1 in your config file.')
rpn_head.update(num_classes=1)
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg['rpn'])
self.bbox_head = MODELS.build(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
# set cat_id of gt_labels to 0 in RPN
rpn_data_samples = copy.deepcopy(batch_data_samples)
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
losses = self.bbox_head.loss(x, rpn_data_samples)
return losses
| 3,380 | 40.231707 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/dino.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
from torch.nn.init import normal_
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from mmdet.utils import OptConfigType
from ..layers import (CdnQueryGenerator, DeformableDetrTransformerEncoder,
DinoTransformerDecoder, SinePositionalEncoding)
from .deformable_detr import DeformableDETR, MultiScaleDeformableAttention
@MODELS.register_module()
class DINO(DeformableDETR):
r"""Implementation of `DINO: DETR with Improved DeNoising Anchor Boxes
for End-to-End Object Detection <https://arxiv.org/abs/2203.03605>`_
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DINO>`_.
Args:
dn_cfg (:obj:`ConfigDict` or dict, optional): Config of denoising
query generator. Defaults to `None`.
"""
def __init__(self, *args, dn_cfg: OptConfigType = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
assert self.as_two_stage, 'as_two_stage must be True for DINO'
assert self.with_box_refine, 'with_box_refine must be True for DINO'
if dn_cfg is not None:
assert 'num_classes' not in dn_cfg and \
'num_queries' not in dn_cfg and \
'hidden_dim' not in dn_cfg, \
'The three keyword args `num_classes`, `embed_dims`, and ' \
'`num_matching_queries` are set in `detector.__init__()`, ' \
'users should not set them in `dn_cfg` config.'
dn_cfg['num_classes'] = self.bbox_head.num_classes
dn_cfg['embed_dims'] = self.embed_dims
dn_cfg['num_matching_queries'] = self.num_queries
self.dn_query_generator = CdnQueryGenerator(**dn_cfg)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DeformableDetrTransformerEncoder(**self.encoder)
self.decoder = DinoTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
# NOTE In DINO, the query_embedding only contains content
# queries, while in Deformable DETR, the query_embedding
# contains both content and spatial queries, and in DETR,
# it only contains spatial queries.
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
self.level_embed = nn.Parameter(
torch.Tensor(self.num_feature_levels, self.embed_dims))
self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims)
self.memory_trans_norm = nn.LayerNorm(self.embed_dims)
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super(DeformableDETR, self).init_weights()
for coder in self.encoder, self.decoder:
for p in coder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
nn.init.xavier_uniform_(self.memory_trans_fc.weight)
nn.init.xavier_uniform_(self.query_embedding.weight)
normal_(self.level_embed)
def forward_transformer(
self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None,
) -> Dict:
"""Forward process of Transformer.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
The difference is that the ground truth in `batch_data_samples` is
required for the `pre_decoder` to prepare the query of DINO.
Additionally, DINO inherits the `pre_transformer` method and the
`forward_encoder` method of DeformableDETR. More details about the
two methods can be found in `mmdet/detector/deformable_detr.py`.
Args:
img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each
feature map has shape (bs, dim, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
dict: The dictionary of bbox_head function inputs, which always
includes the `hidden_states` of the decoder output and may contain
`references` including the initial and intermediate references.
"""
encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer(
img_feats, batch_data_samples)
encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)
tmp_dec_in, head_inputs_dict = self.pre_decoder(
**encoder_outputs_dict, batch_data_samples=batch_data_samples)
decoder_inputs_dict.update(tmp_dec_in)
decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)
head_inputs_dict.update(decoder_outputs_dict)
return head_inputs_dict
def pre_decoder(
self,
memory: Tensor,
memory_mask: Tensor,
spatial_shapes: Tensor,
batch_data_samples: OptSampleList = None,
) -> Tuple[Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`, and `reference_points`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points). Will only be used when
`as_two_stage` is `True`.
spatial_shapes (Tensor): Spatial shapes of features in all levels.
With shape (num_levels, 2), last dimension represents (h, w).
Will only be used when `as_two_stage` is `True`.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict]: The decoder_inputs_dict and head_inputs_dict.
- decoder_inputs_dict (dict): The keyword dictionary args of
`self.forward_decoder()`, which includes 'query', 'memory',
`reference_points`, and `dn_mask`. The reference points of
decoder input here are 4D boxes, although it has `points`
in its name.
- head_inputs_dict (dict): The keyword dictionary args of the
bbox_head functions, which includes `topk_score`, `topk_coords`,
and `dn_meta` when `self.training` is `True`, else is empty.
"""
bs, _, c = memory.shape
cls_out_features = self.bbox_head.cls_branches[
self.decoder.num_layers].out_features
output_memory, output_proposals = self.gen_encoder_output_proposals(
memory, memory_mask, spatial_shapes)
enc_outputs_class = self.bbox_head.cls_branches[
self.decoder.num_layers](
output_memory)
enc_outputs_coord_unact = self.bbox_head.reg_branches[
self.decoder.num_layers](output_memory) + output_proposals
# NOTE The DINO selects top-k proposals according to scores of
# multi-class classification, while DeformDETR, where the input
# is `enc_outputs_class[..., 0]` selects according to scores of
# binary classification.
topk_indices = torch.topk(
enc_outputs_class.max(-1)[0], k=self.num_queries, dim=1)[1]
topk_score = torch.gather(
enc_outputs_class, 1,
topk_indices.unsqueeze(-1).repeat(1, 1, cls_out_features))
topk_coords_unact = torch.gather(
enc_outputs_coord_unact, 1,
topk_indices.unsqueeze(-1).repeat(1, 1, 4))
topk_coords = topk_coords_unact.sigmoid()
topk_coords_unact = topk_coords_unact.detach()
query = self.query_embedding.weight[:, None, :]
query = query.repeat(1, bs, 1).transpose(0, 1)
if self.training:
dn_label_query, dn_bbox_query, dn_mask, dn_meta = \
self.dn_query_generator(batch_data_samples)
query = torch.cat([dn_label_query, query], dim=1)
reference_points = torch.cat([dn_bbox_query, topk_coords_unact],
dim=1)
else:
reference_points = topk_coords_unact
dn_mask, dn_meta = None, None
reference_points = reference_points.sigmoid()
decoder_inputs_dict = dict(
query=query,
memory=memory,
reference_points=reference_points,
dn_mask=dn_mask)
# NOTE DINO calculates encoder losses on scores and coordinates
# of selected top-k encoder queries, while DeformDETR is of all
# encoder queries.
head_inputs_dict = dict(
enc_outputs_class=topk_score,
enc_outputs_coord=topk_coords,
dn_meta=dn_meta) if self.training else dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self,
query: Tensor,
memory: Tensor,
memory_mask: Tensor,
reference_points: Tensor,
spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor,
dn_mask: Optional[Tensor] = None) -> Dict:
"""Forward with Transformer decoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries_total, dim), where `num_queries_total` is the
sum of `num_denoising_queries` and `num_matching_queries` when
`self.training` is `True`, else `num_matching_queries`.
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
reference_points (Tensor): The initial reference, has shape
(bs, num_queries_total, 4) with the last dimension arranged as
(cx, cy, w, h).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
dn_mask (Tensor, optional): The attention mask to prevent
information leakage from different denoising groups and
matching parts, will be used as `self_attn_mask` of the
`self.decoder`, has shape (num_queries_total,
num_queries_total).
It is `None` when `self.training` is `False`.
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output and `references` including
the initial and intermediate reference_points.
"""
inter_states, references = self.decoder(
query=query,
value=memory,
key_padding_mask=memory_mask,
self_attn_mask=dn_mask,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reg_branches=self.bbox_head.reg_branches)
if len(query) == self.num_queries:
# NOTE: This is to make sure label_embeding can be involved to
# produce loss even if there is no denoising query (no ground truth
# target in this GPU), otherwise, this will raise runtime error in
# distributed training.
inter_states[0] += \
self.dn_query_generator.label_embedding.weight[0, 0] * 0.0
decoder_outputs_dict = dict(
hidden_states=inter_states, references=list(references))
return decoder_outputs_dict
| 13,511 | 46.244755 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/trident_faster_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| 3,358 | 39.963415 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/retinanet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 947 | 34.111111 | 75 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/point_rend.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
| 1,103 | 29.666667 | 58 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/queryinst.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .sparse_rcnn import SparseRCNN
@MODELS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,000 | 32.366667 | 65 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/atss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,679 | 39 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/panoptic_two_stage_segmentor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List
import torch
from mmengine.structures import PixelData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class TwoStagePanopticSegmentor(TwoStageDetector):
"""Base class of Two-stage Panoptic Segmentor.
As well as the components in TwoStageDetector, Panoptic Segmentor has extra
semantic_head and panoptic_fusion_head.
"""
def __init__(
self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
# for panoptic segmentation
semantic_head: OptConfigType = None,
panoptic_fusion_head: OptConfigType = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
if semantic_head is not None:
self.semantic_head = MODELS.build(semantic_head)
if panoptic_fusion_head is not None:
panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=panoptic_cfg)
self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_fusion_head.\
num_things_classes
self.num_stuff_classes = self.panoptic_fusion_head.\
num_stuff_classes
self.num_classes = self.panoptic_fusion_head.num_classes
@property
def with_semantic_head(self) -> bool:
"""bool: whether the detector has semantic head"""
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_panoptic_fusion_head(self) -> bool:
"""bool: whether the detector has panoptic fusion head"""
return hasattr(self, 'panoptic_fusion_head') and \
self.panoptic_fusion_head is not None
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_data_samples = copy.deepcopy(batch_data_samples)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict(
x, rpn_data_samples, proposal_cfg=proposal_cfg)
# avoid get same name with roi_head loss
keys = rpn_losses.keys()
for key in list(keys):
if 'loss' in key and 'rpn' not in key:
rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)
losses.update(rpn_losses)
else:
# TODO: Not support currently, should have a check at Fast R-CNN
assert batch_data_samples[0].get('proposals', None) is not None
# use pre-defined proposals in InstanceData for the second stage
# to extract ROI features.
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
roi_losses = self.roi_head.loss(x, rpn_results_list,
batch_data_samples)
losses.update(roi_losses)
semantic_loss = self.semantic_head.loss(x, batch_data_samples)
losses.update(semantic_loss)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
List[:obj:`DetDataSample`]: Return the packed panoptic segmentation
results of input images. Each DetDataSample usually contains
'pred_panoptic_seg'. And the 'pred_panoptic_seg' has a key
``sem_seg``, which is a tensor of shape (1, h, w).
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
x = self.extract_feat(batch_inputs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
rpn_results_list = self.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
results_list = self.roi_head.predict(
x, rpn_results_list, batch_data_samples, rescale=rescale)
seg_preds = self.semantic_head.predict(x, batch_img_metas, rescale)
results_list = self.panoptic_fusion_head.predict(
results_list, seg_preds)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
# TODO the code has not been verified and needs to be refactored later.
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple: A tuple of features from ``rpn_head``, ``roi_head`` and
``semantic_head`` forward.
"""
results = ()
x = self.extract_feat(batch_inputs)
rpn_outs = self.rpn_head.forward(x)
results = results + (rpn_outs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
rpn_results_list = self.rpn_head.predict_by_feat(
*rpn_outs, batch_img_metas=batch_img_metas, rescale=False)
else:
# TODO: Not checked currently.
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
# roi_head
roi_outs = self.roi_head(x, rpn_results_list)
results = results + (roi_outs)
# semantic_head
sem_outs = self.semantic_head.forward(x)
results = results + (sem_outs['seg_preds'], )
return results
def add_pred_to_datasample(self, data_samples: SampleList,
results_list: List[PixelData]) -> SampleList:
"""Add predictions to `DetDataSample`.
Args:
data_samples (list[:obj:`DetDataSample`]): The
annotation data of every samples.
results_list (List[PixelData]): Panoptic segmentation results of
each image.
Returns:
List[:obj:`DetDataSample`]: Return the packed panoptic segmentation
results of input images. Each DetDataSample usually contains
'pred_panoptic_seg'. And the 'pred_panoptic_seg' has a key
``sem_seg``, which is a tensor of shape (1, h, w).
"""
for data_sample, pred_panoptic_seg in zip(data_samples, results_list):
data_sample.pred_panoptic_seg = pred_panoptic_seg
return data_samples
| 9,188 | 38.102128 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/centernet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 974 | 31.5 | 65 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/maskformer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Tuple
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
panoptic_head: OptConfigType = None,
panoptic_fusion_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super(SingleStageDetector, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
panoptic_head_ = panoptic_head.deepcopy()
panoptic_head_.update(train_cfg=train_cfg)
panoptic_head_.update(test_cfg=test_cfg)
self.panoptic_head = MODELS.build(panoptic_head_)
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=test_cfg)
self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(batch_inputs)
losses = self.panoptic_head.loss(x, batch_data_samples)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances' and `pred_panoptic_seg`. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
And the ``pred_panoptic_seg`` contains the following key
- sem_seg (Tensor): panoptic segmentation mask, has a
shape (1, h, w).
"""
feats = self.extract_feat(batch_inputs)
mask_cls_results, mask_pred_results = self.panoptic_head.predict(
feats, batch_data_samples)
results_list = self.panoptic_fusion_head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=rescale)
results = self.add_pred_to_datasample(batch_data_samples, results_list)
return results
def add_pred_to_datasample(self, data_samples: SampleList,
results_list: List[dict]) -> SampleList:
"""Add predictions to `DetDataSample`.
Args:
data_samples (list[:obj:`DetDataSample`], optional): A batch of
data samples that contain annotations and predictions.
results_list (List[dict]): Instance segmentation, segmantic
segmentation and panoptic segmentation results.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances' and `pred_panoptic_seg`. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
And the ``pred_panoptic_seg`` contains the following key
- sem_seg (Tensor): panoptic segmentation mask, has a
shape (1, h, w).
"""
for data_sample, pred_results in zip(data_samples, results_list):
if 'pan_results' in pred_results:
data_sample.pred_panoptic_seg = pred_results['pan_results']
if 'ins_results' in pred_results:
data_sample.pred_instances = pred_results['ins_results']
assert 'sem_results' not in pred_results, 'segmantic ' \
'segmentation results are not supported yet.'
return data_samples
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
tuple[List[Tensor]]: A tuple of features from ``panoptic_head``
forward.
"""
feats = self.extract_feat(batch_inputs)
results = self.panoptic_head.forward(feats, batch_data_samples)
return results
| 7,161 | 40.883041 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/fcos.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of FCOS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of FCOS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,718 | 38.976744 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/fovea.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of FOVEA. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of FOVEA. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,724 | 40.071429 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/single_stage_instance_seg.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Tuple
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
INF = 1e8
@MODELS.register_module()
class SingleStageInstanceSegmentor(BaseDetector):
"""Base class for single-stage instance segmentors."""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
else:
self.neck = None
if bbox_head is not None:
bbox_head.update(train_cfg=copy.deepcopy(train_cfg))
bbox_head.update(test_cfg=copy.deepcopy(test_cfg))
self.bbox_head = MODELS.build(bbox_head)
else:
self.bbox_head = None
assert mask_head, f'`mask_head` must ' \
f'be implemented in {self.__class__.__name__}'
mask_head.update(train_cfg=copy.deepcopy(train_cfg))
mask_head.update(test_cfg=copy.deepcopy(test_cfg))
self.mask_head = MODELS.build(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have different
resolutions.
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def _forward(self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None,
**kwargs) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple: A tuple of features from ``bbox_head`` forward.
"""
outs = ()
# backbone
x = self.extract_feat(batch_inputs)
# bbox_head
positive_infos = None
if self.with_bbox:
assert batch_data_samples is not None
bbox_outs = self.bbox_head.forward(x)
outs = outs + (bbox_outs, )
# It is necessary to use `bbox_head.loss` to update
# `_raw_positive_infos` which will be used in `get_positive_infos`
# positive_infos will be used in the following mask head.
_ = self.bbox_head.loss(x, batch_data_samples, **kwargs)
positive_infos = self.bbox_head.get_positive_infos()
# mask_head
if positive_infos is None:
mask_outs = self.mask_head.forward(x)
else:
mask_outs = self.mask_head.forward(x, positive_infos)
outs = outs + (mask_outs, )
return outs
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList,
**kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
losses = dict()
positive_infos = None
# CondInst and YOLACT have bbox_head
if self.with_bbox:
bbox_losses = self.bbox_head.loss(x, batch_data_samples, **kwargs)
losses.update(bbox_losses)
# get positive information from bbox head, which will be used
# in the following mask head.
positive_infos = self.bbox_head.get_positive_infos()
mask_loss = self.mask_head.loss(
x, batch_data_samples, positive_infos=positive_infos, **kwargs)
# avoid loss override
assert not set(mask_loss.keys()) & set(losses.keys())
losses.update(mask_loss)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True,
**kwargs) -> SampleList:
"""Perform forward propagation of the mask head and predict mask
results on the features of the upstream network.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
x = self.extract_feat(batch_inputs)
if self.with_bbox:
# the bbox branch does not need to be scaled to the original
# image scale, because the mask branch will scale both bbox
# and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.bbox_head.predict(
x, batch_data_samples, rescale=bbox_rescale)
else:
results_list = None
results_list = self.mask_head.predict(
x, batch_data_samples, rescale=rescale, results_list=results_list)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
| 6,915 | 37.209945 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/tood.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of TOOD. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of TOOD. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,726 | 39.162791 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/htc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .cascade_rcnn import CascadeRCNN
@MODELS.register_module()
class HybridTaskCascade(CascadeRCNN):
"""Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
@property
def with_semantic(self) -> bool:
"""bool: whether the detector has a semantic head"""
return self.roi_head.with_semantic
| 490 | 27.882353 | 69 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
from .gfl_increment_erd import GFLIncrementERD # ERD
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'GFLIncrementERD'
]
| 2,801 | 37.383562 | 83 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/cornernet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,044 | 32.709677 | 74 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/boxinst.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class BoxInst(SingleStageInstanceSegmentor):
"""Implementation of `BoxInst <https://arxiv.org/abs/2012.02310>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,047 | 35.137931 | 73 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/dab_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple
from mmengine.model import uniform_init
from torch import Tensor, nn
from mmdet.registry import MODELS
from ..layers import SinePositionalEncoding
from ..layers.transformer import (DABDetrTransformerDecoder,
DABDetrTransformerEncoder, inverse_sigmoid)
from .detr import DETR
@MODELS.register_module()
class DABDETR(DETR):
r"""Implementation of `DAB-DETR:
Dynamic Anchor Boxes are Better Queries for DETR.
<https://arxiv.org/abs/2201.12329>`_.
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DAB-DETR>`_.
Args:
with_random_refpoints (bool): Whether to randomly initialize query
embeddings and not update them during training.
Defaults to False.
num_patterns (int): Inspired by Anchor-DETR. Defaults to 0.
"""
def __init__(self,
*args,
with_random_refpoints: bool = False,
num_patterns: int = 0,
**kwargs) -> None:
self.with_random_refpoints = with_random_refpoints
assert isinstance(num_patterns, int), \
f'num_patterns should be int but {num_patterns}.'
self.num_patterns = num_patterns
super().__init__(*args, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DABDetrTransformerEncoder(**self.encoder)
self.decoder = DABDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
self.query_dim = self.decoder.query_dim
self.query_embedding = nn.Embedding(self.num_queries, self.query_dim)
if self.num_patterns > 0:
self.patterns = nn.Embedding(self.num_patterns, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super(DABDETR, self).init_weights()
if self.with_random_refpoints:
uniform_init(self.query_embedding)
self.query_embedding.weight.data[:, :2] = \
inverse_sigmoid(self.query_embedding.weight.data[:, :2])
self.query_embedding.weight.data[:, :2].requires_grad = False
def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory' and 'reg_branches'.
- head_inputs_dict (dict): The keyword args dictionary of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
batch_size = memory.size(0)
query_pos = self.query_embedding.weight
query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)
if self.num_patterns == 0:
query = query_pos.new_zeros(batch_size, self.num_queries,
self.embed_dims)
else:
query = self.patterns.weight[:, None, None, :]\
.repeat(1, self.num_queries, batch_size, 1)\
.view(-1, batch_size, self.embed_dims)\
.permute(1, 0, 2)
query_pos = query_pos.repeat(1, self.num_patterns, 1)
decoder_inputs_dict = dict(
query_pos=query_pos, query=query, memory=memory)
head_inputs_dict = dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask,
reg_branches=self.bbox_head.
fc_reg # iterative refinement for anchor boxes
)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
| 5,907 | 41.2 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/lad.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_data_samples)
return losses
| 3,880 | 40.287234 | 74 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/vfnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of VFNet. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of VFNet. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,702 | 38.604651 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/condinst.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class CondInst(SingleStageInstanceSegmentor):
"""Implementation of `CondInst <https://arxiv.org/abs/2003.05664>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,049 | 35.206897 | 74 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/panoptic_fpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
@MODELS.register_module()
class PanopticFPN(TwoStagePanopticSegmentor):
r"""Implementation of `Panoptic feature pyramid
networks <https://arxiv.org/pdf/1901.02446>`_"""
def __init__(
self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
# for panoptic segmentation
semantic_head: OptConfigType = None,
panoptic_fusion_head: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg,
semantic_head=semantic_head,
panoptic_fusion_head=panoptic_fusion_head)
| 1,318 | 35.638889 | 67 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/solov2.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,099 | 33.375 | 67 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/sparse_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
| 1,208 | 36.78125 | 73 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/mask_scoring_rcnn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskScoringRCNN(TwoStageDetector):
"""Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 997 | 30.1875 | 65 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/yolof.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOF. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOF. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,809 | 40.136364 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/crowddet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CrowdDet(TwoStageDetector):
"""Implementation of `CrowdDet <https://arxiv.org/abs/2003.09163>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
rpn_head (:obj:`ConfigDict` or dict): The rpn config.
roi_head (:obj:`ConfigDict` or dict): The roi config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of FCOS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of FCOS. Defaults to None.
neck (:obj:`ConfigDict` or dict): The neck config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
| 1,829 | 38.782609 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/detectors/mask2former.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .maskformer import MaskFormer
@MODELS.register_module()
class Mask2Former(MaskFormer):
r"""Implementation of `Masked-attention Mask
Transformer for Universal Image Segmentation
<https://arxiv.org/pdf/2112.01527>`_."""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
panoptic_head: OptConfigType = None,
panoptic_fusion_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
panoptic_head=panoptic_head,
panoptic_fusion_head=panoptic_fusion_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
| 1,136 | 35.677419 | 65 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/base_semantic_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptMultiConfig
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
| 3,866 | 32.921053 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/panoptic_fpn_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from ..layers import ConvUpsample
from ..utils import interpolate_as
from .base_semantic_head import BaseSemanticHead
@MODELS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
In this head, the number of output channels is ``num_stuff_classes
+ 1``, including all stuff classes and one thing class. The stuff
classes will be reset from ``0`` to ``num_stuff_classes - 1``, the
thing classes will be merged to ``num_stuff_classes``-th channel.
Arg:
num_things_classes (int): Number of thing classes. Default: 80.
num_stuff_classes (int): Number of stuff classes. Default: 53.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
``end_level``-th layer will not be used.
conv_cfg (Optional[Union[ConfigDict, dict]]): Dictionary to construct
and config conv layer.
norm_cfg (Union[ConfigDict, dict]): Dictionary to construct and config
norm layer. Use ``GN`` by default.
init_cfg (Optional[Union[ConfigDict, dict]]): Initialization config
dict.
loss_seg (Union[ConfigDict, dict]): the loss of the semantic head.
"""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
in_channels: int = 256,
inner_channels: int = 128,
start_level: int = 0,
end_level: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='GN', num_groups=32, requires_grad=True),
loss_seg: ConfigType = dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
seg_rescale_factor = 1 / 2**(start_level + 2)
super().__init__(
num_classes=num_stuff_classes + 1,
seg_rescale_factor=seg_rescale_factor,
loss_seg=loss_seg,
init_cfg=init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg: Tensor) -> Tensor:
"""Merge thing classes to one class.
In PanopticFPN, the background labels will be reset from `0` to
`self.num_stuff_classes-1`, the foreground labels will be merged to
`self.num_stuff_classes`-th channel.
"""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = gt_semantic_seg < self.num_things_classes
bg_mask = (gt_semantic_seg >= self.num_things_classes) * (
gt_semantic_seg < self.num_things_classes + self.num_stuff_classes)
new_gt_seg = torch.clone(gt_semantic_seg)
new_gt_seg = torch.where(bg_mask,
gt_semantic_seg - self.num_things_classes,
new_gt_seg)
new_gt_seg = torch.where(fg_mask,
fg_mask.int() * self.num_stuff_classes,
new_gt_seg)
return new_gt_seg
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
seg_preds = self(x)['seg_preds']
gt_semantic_segs = [
data_sample.gt_sem_seg.sem_seg
for data_sample in batch_data_samples
]
gt_semantic_segs = torch.stack(gt_semantic_segs)
if self.seg_rescale_factor != 1.0:
gt_semantic_segs = F.interpolate(
gt_semantic_segs.float(),
scale_factor=self.seg_rescale_factor,
mode='nearest').squeeze(1)
# Things classes will be merged to one class in PanopticFPN.
gt_semantic_segs = self._set_things_to_void(gt_semantic_segs)
if seg_preds.shape[-2:] != gt_semantic_segs.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_segs)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_segs.reshape(-1).long())
return dict(loss_seg=loss_seg)
def init_weights(self) -> None:
"""Initialize weights."""
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x: Tuple[Tensor]) -> Dict[str, Tensor]:
"""Forward.
Args:
x (Tuple[Tensor]): Multi scale Feature maps.
Returns:
dict[str, Tensor]: semantic segmentation predictions and
feature maps.
"""
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
seg_feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(seg_feats)
out = dict(seg_preds=seg_preds, seg_feats=seg_feats)
return out
| 6,975 | 38.862857 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403
from .panoptic_fusion_heads import * # noqa: F401,F403
| 170 | 41.75 | 65 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
import torch.nn.functional as F
from mmengine.structures import InstanceData, PixelData
from torch import Tensor
from mmdet.evaluation.functional import INSTANCE_OFFSET
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.mask import mask2bbox
from mmdet.utils import OptConfigType, OptMultiConfig
from .base_panoptic_fusion_head import BasePanopticFusionHead
@MODELS.register_module()
class MaskFormerFusionHead(BasePanopticFusionHead):
"""MaskFormer fusion head which postprocesses results for panoptic
segmentation, instance segmentation and semantic segmentation."""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
test_cfg: OptConfigType = None,
loss_panoptic: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs):
super().__init__(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
test_cfg=test_cfg,
loss_panoptic=loss_panoptic,
init_cfg=init_cfg,
**kwargs)
def loss(self, **kwargs):
"""MaskFormerFusionHead has no training loss."""
return dict()
def panoptic_postprocess(self, mask_cls: Tensor,
mask_pred: Tensor) -> PixelData:
"""Panoptic segmengation inference.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
:obj:`PixelData`: Panoptic segment result of shape \
(h, w), each element in Tensor means: \
``segment_id = _cls + instance_id * INSTANCE_OFFSET``.
"""
object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8)
iou_thr = self.test_cfg.get('iou_thr', 0.8)
filter_low_score = self.test_cfg.get('filter_low_score', False)
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.num_classes) & (scores > object_mask_thr)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.full((h, w),
self.num_classes,
dtype=torch.int32,
device=cur_masks.device)
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
pass
else:
cur_mask_ids = cur_prob_masks.argmax(0)
instance_id = 1
for k in range(cur_classes.shape[0]):
pred_class = int(cur_classes[k].item())
isthing = pred_class < self.num_things_classes
mask = cur_mask_ids == k
mask_area = mask.sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
if filter_low_score:
mask = mask & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0:
if mask_area / original_area < iou_thr:
continue
if not isthing:
# different stuff regions of same class will be
# merged here, and stuff share the instance_id 0.
panoptic_seg[mask] = pred_class
else:
panoptic_seg[mask] = (
pred_class + instance_id * INSTANCE_OFFSET)
instance_id += 1
return PixelData(sem_seg=panoptic_seg[None])
def semantic_postprocess(self, mask_cls: Tensor,
mask_pred: Tensor) -> PixelData:
"""Semantic segmengation postprocess.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
:obj:`PixelData`: Semantic segment result.
"""
# TODO add semantic segmentation result
raise NotImplementedError
def instance_postprocess(self, mask_cls: Tensor,
mask_pred: Tensor) -> InstanceData:
"""Instance segmengation postprocess.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
:obj:`InstanceData`: Instance segmentation results.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
max_per_image = self.test_cfg.get('max_per_image', 100)
num_queries = mask_cls.shape[0]
# shape (num_queries, num_class)
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
# shape (num_queries * num_class, )
labels = torch.arange(self.num_classes, device=mask_cls.device).\
unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, top_indices = scores.flatten(0, 1).topk(
max_per_image, sorted=False)
labels_per_image = labels[top_indices]
query_indices = top_indices // self.num_classes
mask_pred = mask_pred[query_indices]
# extract things
is_thing = labels_per_image < self.num_things_classes
scores_per_image = scores_per_image[is_thing]
labels_per_image = labels_per_image[is_thing]
mask_pred = mask_pred[is_thing]
mask_pred_binary = (mask_pred > 0).float()
mask_scores_per_image = (mask_pred.sigmoid() *
mask_pred_binary).flatten(1).sum(1) / (
mask_pred_binary.flatten(1).sum(1) + 1e-6)
det_scores = scores_per_image * mask_scores_per_image
mask_pred_binary = mask_pred_binary.bool()
bboxes = mask2bbox(mask_pred_binary)
results = InstanceData()
results.bboxes = bboxes
results.labels = labels_per_image
results.scores = det_scores
results.masks = mask_pred_binary
return results
def predict(self,
mask_cls_results: Tensor,
mask_pred_results: Tensor,
batch_data_samples: SampleList,
rescale: bool = False,
**kwargs) -> List[dict]:
"""Test segment without test-time aumengtation.
Only the output of last decoder layers was used.
Args:
mask_cls_results (Tensor): Mask classification logits,
shape (batch_size, num_queries, cls_out_channels).
Note `cls_out_channels` should includes background.
mask_pred_results (Tensor): Mask logits, shape
(batch_size, num_queries, h, w).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): If True, return boxes in
original image space. Default False.
Returns:
list[dict]: Instance segmentation \
results and panoptic segmentation results for each \
image.
.. code-block:: none
[
{
'pan_results': PixelData,
'ins_results': InstanceData,
# semantic segmentation results are not supported yet
'sem_results': PixelData
},
...
]
"""
batch_img_metas = [
data_sample.metainfo for data_sample in batch_data_samples
]
panoptic_on = self.test_cfg.get('panoptic_on', True)
semantic_on = self.test_cfg.get('semantic_on', False)
instance_on = self.test_cfg.get('instance_on', False)
assert not semantic_on, 'segmantic segmentation '\
'results are not supported yet.'
results = []
for mask_cls_result, mask_pred_result, meta in zip(
mask_cls_results, mask_pred_results, batch_img_metas):
# remove padding
img_height, img_width = meta['img_shape'][:2]
mask_pred_result = mask_pred_result[:, :img_height, :img_width]
if rescale:
# return result in original resolution
ori_height, ori_width = meta['ori_shape'][:2]
mask_pred_result = F.interpolate(
mask_pred_result[:, None],
size=(ori_height, ori_width),
mode='bilinear',
align_corners=False)[:, 0]
result = dict()
if panoptic_on:
pan_results = self.panoptic_postprocess(
mask_cls_result, mask_pred_result)
result['pan_results'] = pan_results
if instance_on:
ins_results = self.instance_postprocess(
mask_cls_result, mask_pred_result)
result['ins_results'] = ins_results
if semantic_on:
sem_results = self.semantic_postprocess(
mask_cls_result, mask_pred_result)
result['sem_results'] = sem_results
results.append(result)
return results
| 10,522 | 38.411985 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
from mmengine.structures import InstanceData, PixelData
from torch import Tensor
from mmdet.evaluation.functional import INSTANCE_OFFSET
from mmdet.registry import MODELS
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig, PixelList
from .base_panoptic_fusion_head import BasePanopticFusionHead
@MODELS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super().__init__(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
test_cfg=test_cfg,
loss_panoptic=None,
init_cfg=init_cfg,
**kwargs)
def loss(self, **kwargs) -> dict:
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self,
mask_results: InstanceData,
overlap_thr: float = 0.5) -> Tensor:
"""Lay instance masks to a result map.
Args:
mask_results (:obj:`InstanceData`): Instance segmentation results,
each contains ``bboxes``, ``labels``, ``scores`` and ``masks``.
overlap_thr (float): Threshold to determine whether two masks
overlap. default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
bboxes = mask_results.bboxes
scores = mask_results.scores
labels = mask_results.labels
masks = mask_results.masks
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def _predict_single(self, mask_results: InstanceData, seg_preds: Tensor,
**kwargs) -> PixelData:
"""Fuse the results of instance and semantic segmentations.
Args:
mask_results (:obj:`InstanceData`): Instance segmentation results,
each contains ``bboxes``, ``labels``, ``scores`` and ``masks``.
seg_preds (Tensor): The semantic segmentation results,
(num_stuff + 1, H, W).
Returns:
Tensor: The panoptic segmentation result, (H, W).
"""
id_map, labels = self._lay_masks(mask_results,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(len(mask_results)):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
pan_results = PixelData(sem_seg=pan_results[None].int())
return pan_results
def predict(self, mask_results_list: InstanceList,
seg_preds_list: List[Tensor], **kwargs) -> PixelList:
"""Predict results by fusing the results of instance and semantic
segmentations.
Args:
mask_results_list (list[:obj:`InstanceData`]): Instance
segmentation results, each contains ``bboxes``, ``labels``,
``scores`` and ``masks``.
seg_preds_list (Tensor): List of semantic segmentation results.
Returns:
List[PixelData]: Panoptic segmentation result.
"""
results_list = [
self._predict_single(mask_results_list[i], seg_preds_list[i])
for i in range(len(mask_results_list))
]
return results_list
| 5,702 | 34.64375 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_panoptic_fusion_head import \
BasePanopticFusionHead # noqa: F401,F403
from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403
| 285 | 46.666667 | 75 |
py
|
ERD
|
ERD-main/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
"""Base class for panoptic heads."""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
test_cfg: OptConfigType = None,
loss_panoptic: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super().__init__(init_cfg=init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = num_things_classes + num_stuff_classes
self.test_cfg = test_cfg
if loss_panoptic:
self.loss_panoptic = MODELS.build(loss_panoptic)
else:
self.loss_panoptic = None
@property
def with_loss(self) -> bool:
"""bool: whether the panoptic head contains loss function."""
return self.loss_panoptic is not None
@abstractmethod
def loss(self, **kwargs):
"""Loss function."""
@abstractmethod
def predict(self, **kwargs):
"""Predict function."""
| 1,369 | 30.136364 | 69 |
py
|
ERD
|
ERD-main/mmdet/models/necks/yolox_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from ..layers import CSPLayer
@MODELS.register_module()
class YOLOXPAFPN(BaseModule):
"""Path Aggregation Network used in YOLOX.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_csp_blocks=3,
use_depthwise=False,
upsample_cfg=dict(scale_factor=2, mode='nearest'),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super(YOLOXPAFPN, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
| 5,658 | 35.044586 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/necks/ssd_neck.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
| 4,901 | 36.707692 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/necks/rfp.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import BaseModule, ModuleList, constant_init, xavier_init
from mmdet.registry import MODELS
from .fpn import FPN
class ASPP(BaseModule):
"""ASPP (Atrous Spatial Pyramid Pooling)
This is an implementation of the ASPP module used in DetectoRS
(https://arxiv.org/pdf/2006.02334.pdf)
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of channels produced by this module
dilations (tuple[int]): Dilations of the four branches.
Default: (1, 3, 6, 1)
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
dilations=(1, 3, 6, 1),
init_cfg=dict(type='Kaiming', layer='Conv2d')):
super().__init__(init_cfg)
assert dilations[-1] == 1
self.aspp = nn.ModuleList()
for dilation in dilations:
kernel_size = 3 if dilation > 1 else 1
padding = dilation if dilation > 1 else 0
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation,
padding=padding,
bias=True)
self.aspp.append(conv)
self.gap = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
avg_x = self.gap(x)
out = []
for aspp_idx in range(len(self.aspp)):
inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
out.append(F.relu_(self.aspp[aspp_idx](inp)))
out[-1] = out[-1].expand_as(out[-2])
out = torch.cat(out, dim=1)
return out
@MODELS.register_module()
class RFP(FPN):
"""RFP (Recursive Feature Pyramid)
This is an implementation of RFP in `DetectoRS
<https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the
input of RFP should be multi level features along with origin input image
of backbone.
Args:
rfp_steps (int): Number of unrolled steps of RFP.
rfp_backbone (dict): Configuration of the backbone for RFP.
aspp_out_channels (int): Number of output channels of ASPP module.
aspp_dilations (tuple[int]): Dilation rates of four branches.
Default: (1, 3, 6, 1)
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
rfp_steps,
rfp_backbone,
aspp_out_channels,
aspp_dilations=(1, 3, 6, 1),
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg, **kwargs)
self.rfp_steps = rfp_steps
# Be careful! Pretrained weights cannot be loaded when use
# nn.ModuleList
self.rfp_modules = ModuleList()
for rfp_idx in range(1, rfp_steps):
rfp_module = MODELS.build(rfp_backbone)
self.rfp_modules.append(rfp_module)
self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
aspp_dilations)
self.rfp_weight = nn.Conv2d(
self.out_channels,
1,
kernel_size=1,
stride=1,
padding=0,
bias=True)
def init_weights(self):
# Avoid using super().init_weights(), which may alter the default
# initialization of the modules in self.rfp_modules that have missing
# keys in the pretrained checkpoint.
for convs in [self.lateral_convs, self.fpn_convs]:
for m in convs.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
for rfp_idx in range(self.rfp_steps - 1):
self.rfp_modules[rfp_idx].init_weights()
constant_init(self.rfp_weight, 0)
def forward(self, inputs):
inputs = list(inputs)
assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
img = inputs.pop(0)
# FPN forward
x = super().forward(tuple(inputs))
for rfp_idx in range(self.rfp_steps - 1):
rfp_feats = [x[0]] + list(
self.rfp_aspp(x[i]) for i in range(1, len(x)))
x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
# FPN forward
x_idx = super().forward(x_idx)
x_new = []
for ft_idx in range(len(x_idx)):
add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
x_new.append(add_weight * x_idx[ft_idx] +
(1 - add_weight) * x[ft_idx])
x = x_new
return x
| 5,024 | 36.222222 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/necks/dilated_encoder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, is_norm
from mmengine.model import caffe2_xavier_init, constant_init, normal_init
from torch.nn import BatchNorm2d
from mmdet.registry import MODELS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@MODELS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
| 3,967 | 35.072727 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/fpg.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
class Transition(BaseModule):
"""Base class for transition.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
"""
def __init__(self, in_channels, out_channels, init_cfg=None):
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
def forward(x):
pass
class UpInterpolationConv(Transition):
"""A transition used for up-sampling.
Up-sample the input by interpolation then refines the feature by
a convolution layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Up-sampling factor. Default: 2.
mode (int): Interpolation mode. Default: nearest.
align_corners (bool): Whether align corners when interpolation.
Default: None.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor=2,
mode='nearest',
align_corners=None,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, x):
x = F.interpolate(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
x = self.conv(x)
return x
class LastConv(Transition):
"""A transition used for refining the output of the last stage.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_inputs (int): Number of inputs of the FPN features.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
num_inputs,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.num_inputs = num_inputs
self.conv_out = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, inputs):
assert len(inputs) == self.num_inputs
return self.conv_out(inputs[-1])
@MODELS.register_module()
class FPG(BaseModule):
"""FPG.
Implementation of `Feature Pyramid Grids (FPG)
<https://arxiv.org/abs/2004.03580>`_.
This implementation only gives the basic structure stated in the paper.
But users can implement different type of transitions to fully explore the
the potential power of the structure of FPG.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
paths (list[str]): Specify the path order of each stack level.
Each element in the list should be either 'bu' (bottom-up) or
'td' (top-down).
inter_channels (int): Number of inter channels.
same_up_trans (dict): Transition that goes down at the same stage.
same_down_trans (dict): Transition that goes up at the same stage.
across_lateral_trans (dict): Across-pathway same-stage
across_down_trans (dict): Across-pathway bottom-up connection.
across_up_trans (dict): Across-pathway top-down connection.
across_skip_trans (dict): Across-pathway skip connection.
output_trans (dict): Transition that trans the output of the
last stage.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
transition_types = {
'conv': ConvModule,
'interpolation_conv': UpInterpolationConv,
'last_conv': LastConv,
}
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
paths,
inter_channels=None,
same_down_trans=None,
same_up_trans=dict(
type='conv', kernel_size=3, stride=2, padding=1),
across_lateral_trans=dict(type='conv', kernel_size=1),
across_down_trans=dict(type='conv', kernel_size=3),
across_up_trans=None,
across_skip_trans=dict(type='identity'),
output_trans=dict(type='last_conv', kernel_size=3),
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
skip_inds=None,
init_cfg=[
dict(type='Caffe2Xavier', layer='Conv2d'),
dict(
type='Constant',
layer=[
'_BatchNorm', '_InstanceNorm', 'GroupNorm',
'LayerNorm'
],
val=1.0)
]):
super(FPG, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
if inter_channels is None:
self.inter_channels = [out_channels for _ in range(num_outs)]
elif isinstance(inter_channels, int):
self.inter_channels = [inter_channels for _ in range(num_outs)]
else:
assert isinstance(inter_channels, list)
assert len(inter_channels) == num_outs
self.inter_channels = inter_channels
self.stack_times = stack_times
self.paths = paths
assert isinstance(paths, list) and len(paths) == stack_times
for d in paths:
assert d in ('bu', 'td')
self.same_down_trans = same_down_trans
self.same_up_trans = same_up_trans
self.across_lateral_trans = across_lateral_trans
self.across_down_trans = across_down_trans
self.across_up_trans = across_up_trans
self.output_trans = output_trans
self.across_skip_trans = across_skip_trans
self.with_bias = norm_cfg is None
# skip inds must be specified if across skip trans is not None
if self.across_skip_trans is not None:
skip_inds is not None
self.skip_inds = skip_inds
assert len(self.skip_inds[0]) <= self.stack_times
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# build lateral 1x1 convs to reduce channels
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = nn.Conv2d(self.in_channels[i],
self.inter_channels[i - self.start_level], 1)
self.lateral_convs.append(l_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
if self.add_extra_convs:
fpn_idx = self.backbone_end_level - self.start_level + i
extra_conv = nn.Conv2d(
self.inter_channels[fpn_idx - 1],
self.inter_channels[fpn_idx],
3,
stride=2,
padding=1)
self.extra_downsamples.append(extra_conv)
else:
self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
self.fpn_transitions = nn.ModuleList() # stack times
for s in range(self.stack_times):
stage_trans = nn.ModuleList() # num of feature levels
for i in range(self.num_outs):
# same, across_lateral, across_down, across_up
trans = nn.ModuleDict()
if s in self.skip_inds[i]:
stage_trans.append(trans)
continue
# build same-stage down trans (used in bottom-up paths)
if i == 0 or self.same_up_trans is None:
same_up_trans = None
else:
same_up_trans = self.build_trans(
self.same_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['same_up'] = same_up_trans
# build same-stage up trans (used in top-down paths)
if i == self.num_outs - 1 or self.same_down_trans is None:
same_down_trans = None
else:
same_down_trans = self.build_trans(
self.same_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['same_down'] = same_down_trans
# build across lateral trans
across_lateral_trans = self.build_trans(
self.across_lateral_trans, self.inter_channels[i],
self.inter_channels[i])
trans['across_lateral'] = across_lateral_trans
# build across down trans
if i == self.num_outs - 1 or self.across_down_trans is None:
across_down_trans = None
else:
across_down_trans = self.build_trans(
self.across_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['across_down'] = across_down_trans
# build across up trans
if i == 0 or self.across_up_trans is None:
across_up_trans = None
else:
across_up_trans = self.build_trans(
self.across_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_up'] = across_up_trans
if self.across_skip_trans is None:
across_skip_trans = None
else:
across_skip_trans = self.build_trans(
self.across_skip_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_skip'] = across_skip_trans
# build across_skip trans
stage_trans.append(trans)
self.fpn_transitions.append(stage_trans)
self.output_transition = nn.ModuleList() # output levels
for i in range(self.num_outs):
trans = self.build_trans(
self.output_trans,
self.inter_channels[i],
self.out_channels,
num_inputs=self.stack_times + 1)
self.output_transition.append(trans)
self.relu = nn.ReLU(inplace=True)
def build_trans(self, cfg, in_channels, out_channels, **extra_args):
cfg_ = cfg.copy()
trans_type = cfg_.pop('type')
trans_cls = self.transition_types[trans_type]
return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
def fuse(self, fuse_dict):
out = None
for item in fuse_dict.values():
if item is not None:
if out is None:
out = item
else:
out = out + item
return out
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build all levels from original feature maps
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
outs = [feats]
for i in range(self.stack_times):
current_outs = outs[-1]
next_outs = []
direction = self.paths[i]
for j in range(self.num_outs):
if i in self.skip_inds[j]:
next_outs.append(outs[-1][j])
continue
# feature level
if direction == 'td':
lvl = self.num_outs - j - 1
else:
lvl = j
# get transitions
if direction == 'td':
same_trans = self.fpn_transitions[i][lvl]['same_down']
else:
same_trans = self.fpn_transitions[i][lvl]['same_up']
across_lateral_trans = self.fpn_transitions[i][lvl][
'across_lateral']
across_down_trans = self.fpn_transitions[i][lvl]['across_down']
across_up_trans = self.fpn_transitions[i][lvl]['across_up']
across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
# init output
to_fuse = dict(
same=None, lateral=None, across_up=None, across_down=None)
# same downsample/upsample
if same_trans is not None:
to_fuse['same'] = same_trans(next_outs[-1])
# across lateral
if across_lateral_trans is not None:
to_fuse['lateral'] = across_lateral_trans(
current_outs[lvl])
# across downsample
if lvl > 0 and across_up_trans is not None:
to_fuse['across_up'] = across_up_trans(current_outs[lvl -
1])
# across upsample
if (lvl < self.num_outs - 1 and across_down_trans is not None):
to_fuse['across_down'] = across_down_trans(
current_outs[lvl + 1])
if across_skip_trans is not None:
to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
x = self.fuse(to_fuse)
next_outs.append(x)
if direction == 'td':
outs.append(next_outs[::-1])
else:
outs.append(next_outs)
# output trans
final_outs = []
for i in range(self.num_outs):
lvl_out_list = []
for s in range(len(outs)):
lvl_out_list.append(outs[s][i])
lvl_out = self.output_transition[i](lvl_out_list)
final_outs.append(lvl_out)
return final_outs
| 16,397 | 39.289926 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .fpn import FPN
@MODELS.register_module()
class PAFPN(FPN):
"""Path Aggregation Network for Instance Segmentation.
This is an implementation of the `PAFPN in Path Aggregation Network
<https://arxiv.org/abs/1803.01534>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(PAFPN, self).__init__(
in_channels,
out_channels,
num_outs,
start_level,
end_level,
add_extra_convs,
relu_before_extra_convs,
no_norm_on_lateral,
conv_cfg,
norm_cfg,
act_cfg,
init_cfg=init_cfg)
# add extra bottom up pathway
self.downsample_convs = nn.ModuleList()
self.pafpn_convs = nn.ModuleList()
for i in range(self.start_level + 1, self.backbone_end_level):
d_conv = ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
pafpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.downsample_convs.append(d_conv)
self.pafpn_convs.append(pafpn_conv)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], size=prev_shape, mode='nearest')
# build outputs
# part 1: from original levels
inter_outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add bottom-up path
for i in range(0, used_backbone_levels - 1):
inter_outs[i + 1] = inter_outs[i + 1] + \
self.downsample_convs[i](inter_outs[i])
outs = []
outs.append(inter_outs[0])
outs.extend([
self.pafpn_convs[i - 1](inter_outs[i])
for i in range(1, used_backbone_levels)
])
# part 3: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
elif self.add_extra_convs == 'on_lateral':
outs.append(self.fpn_convs[used_backbone_levels](
laterals[-1]))
elif self.add_extra_convs == 'on_output':
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
else:
raise NotImplementedError
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 6,277 | 38.734177 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/nasfcos_fpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.ops.merge_cells import ConcatCell
from mmengine.model import BaseModule, caffe2_xavier_init
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOS_FPN(BaseModule):
"""FPN structure in NASFPN.
Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for
Object Detection <https://arxiv.org/abs/1906.04423>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=1,
end_level=-1,
add_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(NASFCOS_FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.adapt_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
adapt_conv = ConvModule(
in_channels[i],
out_channels,
1,
stride=1,
padding=0,
bias=False,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU', inplace=False))
self.adapt_convs.append(adapt_conv)
# C2 is omitted according to the paper
extra_levels = num_outs - self.backbone_end_level + self.start_level
def build_concat_cell(with_input1_conv, with_input2_conv):
cell_conv_cfg = dict(
kernel_size=1, padding=0, bias=False, groups=out_channels)
return ConcatCell(
in_channels=out_channels,
out_channels=out_channels,
with_out_conv=True,
out_conv_cfg=cell_conv_cfg,
out_norm_cfg=dict(type='BN'),
out_conv_order=('norm', 'act', 'conv'),
with_input1_conv=with_input1_conv,
with_input2_conv=with_input2_conv,
input_conv_cfg=conv_cfg,
input_norm_cfg=norm_cfg,
upsample_mode='nearest')
# Denote c3=f0, c4=f1, c5=f2 for convince
self.fpn = nn.ModuleDict()
self.fpn['c22_1'] = build_concat_cell(True, True)
self.fpn['c22_2'] = build_concat_cell(True, True)
self.fpn['c32'] = build_concat_cell(True, False)
self.fpn['c02'] = build_concat_cell(True, False)
self.fpn['c42'] = build_concat_cell(True, True)
self.fpn['c36'] = build_concat_cell(True, True)
self.fpn['c61'] = build_concat_cell(True, True) # f9
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_act_cfg = None if i == 0 \
else dict(type='ReLU', inplace=False)
self.extra_downsamples.append(
ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
act_cfg=extra_act_cfg,
order=('act', 'norm', 'conv')))
def forward(self, inputs):
"""Forward function."""
feats = [
adapt_conv(inputs[i + self.start_level])
for i, adapt_conv in enumerate(self.adapt_convs)
]
for (i, module_name) in enumerate(self.fpn):
idx_1, idx_2 = int(module_name[1]), int(module_name[2])
res = self.fpn[module_name](feats[idx_1], feats[idx_2])
feats.append(res)
ret = []
for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5
feats1, feats2 = feats[idx], feats[5]
feats2_resize = F.interpolate(
feats2,
size=feats1.size()[2:],
mode='bilinear',
align_corners=False)
feats_sum = feats1 + feats2_resize
ret.append(
F.interpolate(
feats_sum,
size=inputs[input_idx].size()[2:],
mode='bilinear',
align_corners=False))
for submodule in self.extra_downsamples:
ret.append(submodule(ret[-1]))
return tuple(ret)
def init_weights(self):
"""Initialize the weights of module."""
super(NASFCOS_FPN, self).init_weights()
for module in self.fpn.values():
if hasattr(module, 'conv_out'):
caffe2_xavier_init(module.out_conv.conv)
for modules in [
self.adapt_convs.modules(),
self.extra_downsamples.modules()
]:
for module in modules:
if isinstance(module, nn.Conv2d):
caffe2_xavier_init(module)
| 6,713 | 38.263158 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/fpn_carafe.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmengine.model import BaseModule, ModuleList, xavier_init
from mmdet.registry import MODELS
@MODELS.register_module()
class FPN_CARAFE(BaseModule):
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
choice for upsample methods during the top-down pathway.
It can reproduce the performance of ICCV 2019 paper
CARAFE: Content-Aware ReAssembly of FEatures
Please refer to https://arxiv.org/abs/1905.02188 for more details.
Args:
in_channels (list[int]): Number of channels for each input feature map.
out_channels (int): Output channels of feature pyramids.
num_outs (int): Number of output stages.
start_level (int): Start level of feature pyramids.
(Default: 0)
end_level (int): End level of feature pyramids.
(Default: -1 indicates the last level).
norm_cfg (dict): Dictionary to construct and config norm layer.
activate (str): Type of activation function in ConvModule
(Default: None indicates w/o activation).
order (dict): Order of components in ConvModule.
upsample (str): Type of upsample layer.
upsample_cfg (dict): Dictionary to construct and config upsample layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FPN_CARAFE, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_bias = norm_cfg is None
self.upsample_cfg = upsample_cfg.copy()
self.upsample = self.upsample_cfg.get('type')
self.relu = nn.ReLU(inplace=False)
self.order = order
assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
assert self.upsample in [
'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
]
if self.upsample in ['deconv', 'pixel_shuffle']:
assert hasattr(
self.upsample_cfg,
'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = ModuleList()
self.fpn_convs = ModuleList()
self.upsample_modules = ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if i != self.backbone_end_level - 1:
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample == 'deconv':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsample_cfg_.update(channels=out_channels, scale_factor=2)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsample_cfg_.update(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsample_module = build_upsample_layer(upsample_cfg_)
self.upsample_modules.append(upsample_module)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_out_levels = (
num_outs - self.backbone_end_level + self.start_level)
if extra_out_levels >= 1:
for i in range(extra_out_levels):
in_channels = (
self.in_channels[self.backbone_end_level -
1] if i == 0 else out_channels)
extra_l_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if self.upsample == 'deconv':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsampler_cfg_ = dict(
channels=out_channels,
scale_factor=2,
**self.upsample_cfg)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsampler_cfg_ = dict(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample
upsample_module = build_upsample_layer(upsampler_cfg_)
extra_fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
self.upsample_modules.append(upsample_module)
self.fpn_convs.append(extra_fpn_conv)
self.lateral_convs.append(extra_l_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of module."""
super(FPN_CARAFE, self).init_weights()
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
xavier_init(m, distribution='uniform')
for m in self.modules():
if isinstance(m, CARAFEPack):
m.init_weights()
def slice_as(self, src, dst):
"""Slice ``src`` as ``dst``
Note:
``src`` should have the same or larger size than ``dst``.
Args:
src (torch.Tensor): Tensors to be sliced.
dst (torch.Tensor): ``src`` will be sliced to have the same
size as ``dst``.
Returns:
torch.Tensor: Sliced tensor.
"""
assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
return src
else:
return src[:, :, :dst.size(2), :dst.size(3)]
def tensor_add(self, a, b):
"""Add tensors ``a`` and ``b`` that might have different sizes."""
if a.size() == b.size():
c = a + b
else:
c = a + self.slice_as(b, a)
return c
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = []
for i, lateral_conv in enumerate(self.lateral_convs):
if i <= self.backbone_end_level - self.start_level:
input = inputs[min(i + self.start_level, len(inputs) - 1)]
else:
input = laterals[-1]
lateral = lateral_conv(input)
laterals.append(lateral)
# build top-down path
for i in range(len(laterals) - 1, 0, -1):
if self.upsample is not None:
upsample_feat = self.upsample_modules[i - 1](laterals[i])
else:
upsample_feat = laterals[i]
laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
# build outputs
num_conv_outs = len(self.fpn_convs)
outs = []
for i in range(num_conv_outs):
out = self.fpn_convs[i](laterals[i])
outs.append(out)
return tuple(outs)
| 11,159 | 39.434783 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/ct_resnet_neck.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import OptMultiConfig
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channels (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Defaults to True.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...],
use_dcn: bool = True,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channels = in_channels
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(
self, num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channels = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channels,
feat_channels,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channels,
feat_channels,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channels = feat_channels
return nn.Sequential(*layers)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
"""model forward."""
assert isinstance(x, (list, tuple))
outs = self.deconv_layers(x[-1])
return outs,
| 4,038 | 38.213592 | 75 |
py
|
ERD
|
ERD-main/mmdet/models/necks/ssh.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
class SSHContextModule(BaseModule):
"""This is an implementation of `SSH context module` described in `SSH:
Single Stage Headless Face Detector.
<https://arxiv.org/pdf/1708.03979.pdf>`_.
Args:
in_channels (int): Number of input channels used at each scale.
out_channels (int): Number of output channels used at each scale.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN').
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
assert out_channels % 4 == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.conv5x5_1 = ConvModule(
self.in_channels,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
)
self.conv5x5_2 = ConvModule(
self.out_channels // 4,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv7x7_2 = ConvModule(
self.out_channels // 4,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
)
self.conv7x7_3 = ConvModule(
self.out_channels // 4,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None,
)
def forward(self, x: torch.Tensor) -> tuple:
conv5x5_1 = self.conv5x5_1(x)
conv5x5 = self.conv5x5_2(conv5x5_1)
conv7x7_2 = self.conv7x7_2(conv5x5_1)
conv7x7 = self.conv7x7_3(conv7x7_2)
return (conv5x5, conv7x7)
class SSHDetModule(BaseModule):
"""This is an implementation of `SSH detection module` described in `SSH:
Single Stage Headless Face Detector.
<https://arxiv.org/pdf/1708.03979.pdf>`_.
Args:
in_channels (int): Number of input channels used at each scale.
out_channels (int): Number of output channels used at each scale.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN').
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
assert out_channels % 4 == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.conv3x3 = ConvModule(
self.in_channels,
self.out_channels // 2,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.context_module = SSHContextModule(
in_channels=self.in_channels,
out_channels=self.out_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
def forward(self, x: torch.Tensor) -> torch.Tensor:
conv3x3 = self.conv3x3(x)
conv5x5, conv7x7 = self.context_module(x)
out = torch.cat([conv3x3, conv5x5, conv7x7], dim=1)
out = F.relu(out)
return out
@MODELS.register_module()
class SSH(BaseModule):
"""`SSH Neck` used in `SSH: Single Stage Headless Face Detector.
<https://arxiv.org/pdf/1708.03979.pdf>`_.
Args:
num_scales (int): The number of scales / stages.
in_channels (list[int]): The number of input channels per scale.
out_channels (list[int]): The number of output channels per scale.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN').
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [8, 16, 32, 64]
>>> out_channels = [16, 32, 64, 128]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = SSH(num_scales=4, in_channels=in_channels,
... out_channels=out_channels)
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 16, 340, 340])
outputs[1].shape = torch.Size([1, 32, 170, 170])
outputs[2].shape = torch.Size([1, 64, 84, 84])
outputs[3].shape = torch.Size([1, 128, 43, 43])
"""
def __init__(self,
num_scales: int,
in_channels: List[int],
out_channels: List[int],
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super().__init__(init_cfg=init_cfg)
assert (num_scales == len(in_channels) == len(out_channels))
self.num_scales = num_scales
self.in_channels = in_channels
self.out_channels = out_channels
for idx in range(self.num_scales):
in_c, out_c = self.in_channels[idx], self.out_channels[idx]
self.add_module(
f'ssh_module{idx}',
SSHDetModule(
in_channels=in_c,
out_channels=out_c,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
def forward(self, inputs: Tuple[torch.Tensor]) -> tuple:
assert len(inputs) == self.num_scales
outs = []
for idx, x in enumerate(inputs):
ssh_module = getattr(self, f'ssh_module{idx}')
out = ssh_module(x)
outs.append(out)
return tuple(outs)
| 7,456 | 33.364055 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/necks/cspnext_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from ..layers import CSPLayer
@MODELS.register_module()
class CSPNeXtPAFPN(BaseModule):
"""Path Aggregation Network with CSPNeXt blocks.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer.
Defaults to 3.
use_depthwise (bool): Whether to use depthwise separable convolution in
blocks. Defaults to False.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Default: 0.5
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(
self,
in_channels: Sequence[int],
out_channels: int,
num_csp_blocks: int = 3,
use_depthwise: bool = False,
expand_ratio: float = 0.5,
upsample_cfg: ConfigType = dict(scale_factor=2, mode='nearest'),
conv_cfg: bool = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
init_cfg: OptMultiConfig = dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')
) -> None:
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
use_cspnext_block=True,
expand_ratio=expand_ratio,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
use_cspnext_block=True,
expand_ratio=expand_ratio,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
conv(
in_channels[i],
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
| 6,178 | 35.134503 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/fpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, MultiConfig, OptConfigType
@MODELS.register_module()
class FPN(BaseModule):
r"""Feature Pyramid Network.
This is an implementation of paper `Feature Pyramid Networks for Object
Detection <https://arxiv.org/abs/1612.03144>`_.
Args:
in_channels (list[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Defaults to 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Defaults to -1, which means the
last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Defaults to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Defaults to False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Defaults to False.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Defaults to None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Defaults to None.
upsample_cfg (:obj:`ConfigDict` or dict, optional): Config dict
for interpolate layer. Defaults to dict(mode='nearest').
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
num_outs: int,
start_level: int = 0,
end_level: int = -1,
add_extra_convs: Union[bool, str] = False,
relu_before_extra_convs: bool = False,
no_norm_on_lateral: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = None,
upsample_cfg: ConfigType = dict(mode='nearest'),
init_cfg: MultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
self.add_extra_convs = 'on_input'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def forward(self, inputs: Tuple[Tensor]) -> tuple:
"""Forward function.
Args:
inputs (tuple[Tensor]): Features from the upstream network, each
is a 4D-tensor.
Returns:
tuple: Feature maps, each is a 4D-tensor.
"""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
# fix runtime error of "+=" inplace operation in PyTorch 1.10
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 9,420 | 41.436937 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/nas_fpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell
from mmengine.model import BaseModule, ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class NASFPN(BaseModule):
"""NAS-FPN.
Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture
for Object Detection <https://arxiv.org/abs/1904.07392>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Defaults to 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Defaults to -1, which means the
last level.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
num_outs: int,
stack_times: int,
start_level: int = 0,
end_level: int = -1,
norm_cfg: OptConfigType = None,
init_cfg: MultiConfig = dict(type='Caffe2Xavier', layer='Conv2d')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels) # num of input feature levels
self.num_outs = num_outs # num of output feature levels
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
# add lateral connections
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
act_cfg=None)
self.lateral_convs.append(l_conv)
# add extra downsample layers (stride-2 pooling or conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(
out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(
nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
# add NAS FPN connections
self.fpn_stages = ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
# gp(p6, p4) -> p4_1
stage['gp_64_4'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_1, p4) -> p4_2
stage['sum_44_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_2, p3) -> p3_out
stage['sum_43_3'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p3_out, p4_2) -> p4_out
stage['sum_34_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p5, gp(p4_out, p3_out)) -> p5_out
stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_55_5'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p7, gp(p5_out, p4_2)) -> p7_out
stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_77_7'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# gp(p7_out, p5_out) -> p6_out
stage['gp_75_6'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def forward(self, inputs: Tuple[Tensor]) -> tuple:
"""Forward function.
Args:
inputs (tuple[Tensor]): Features from the upstream network, each
is a 4D-tensor.
Returns:
tuple: Feature maps, each is a 4D-tensor.
"""
# build P3-P5
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build P6-P7 on top of P5
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
p3, p4, p5, p6, p7 = feats
for stage in self.fpn_stages:
# gp(p6, p4) -> p4_1
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])
# sum(p4_1, p4) -> p4_2
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])
# sum(p4_2, p3) -> p3_out
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])
# sum(p3_out, p4_2) -> p4_out
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])
# sum(p5, gp(p4_out, p3_out)) -> p5_out
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])
# sum(p7, gp(p5_out, p4_2)) -> p7_out
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])
# gp(p7_out, p5_out) -> p6_out
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])
return p3, p4, p5, p6, p7
| 6,878 | 38.994186 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .ssh import SSH
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH'
]
| 831 | 31 | 76 |
py
|
ERD
|
ERD-main/mmdet/models/necks/dyhead.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer, build_norm_layer
from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d
from mmengine.model import BaseModule, constant_init, normal_init
from mmdet.registry import MODELS
from ..layers import DyReLU
# Reference:
# https://github.com/microsoft/DynamicHead
# https://github.com/jshilong/SEPC
class DyDCNv2(nn.Module):
"""ModulatedDeformConv2d with normalization layer used in DyHead.
This module cannot be configured with `conv_cfg=dict(type='DCNv2')`
because DyHead calculates offset and mask from middle-level feature.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int | tuple[int], optional): Stride of the convolution.
Default: 1.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: dict(type='GN', num_groups=16, requires_grad=True).
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)):
super().__init__()
self.with_norm = norm_cfg is not None
bias = not self.with_norm
self.conv = ModulatedDeformConv2d(
in_channels, out_channels, 3, stride=stride, padding=1, bias=bias)
if self.with_norm:
self.norm = build_norm_layer(norm_cfg, out_channels)[1]
def forward(self, x, offset, mask):
"""Forward function."""
x = self.conv(x.contiguous(), offset, mask)
if self.with_norm:
x = self.norm(x)
return x
class DyHeadBlock(nn.Module):
"""DyHead Block with three types of attention.
HSigmoid arguments in default act_cfg follow official code, not paper.
https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
zero_init_offset (bool, optional): Whether to use zero init for
`spatial_conv_offset`. Default: True.
act_cfg (dict, optional): Config dict for the last activation layer of
scale-aware attention. Default: dict(type='HSigmoid', bias=3.0,
divisor=6.0).
"""
def __init__(self,
in_channels,
out_channels,
zero_init_offset=True,
act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)):
super().__init__()
self.zero_init_offset = zero_init_offset
# (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x
self.offset_and_mask_dim = 3 * 3 * 3
self.offset_dim = 2 * 3 * 3
self.spatial_conv_high = DyDCNv2(in_channels, out_channels)
self.spatial_conv_mid = DyDCNv2(in_channels, out_channels)
self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2)
self.spatial_conv_offset = nn.Conv2d(
in_channels, self.offset_and_mask_dim, 3, padding=1)
self.scale_attn_module = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1),
nn.ReLU(inplace=True), build_activation_layer(act_cfg))
self.task_attn_module = DyReLU(out_channels)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, 0, 0.01)
if self.zero_init_offset:
constant_init(self.spatial_conv_offset, 0)
def forward(self, x):
"""Forward function."""
outs = []
for level in range(len(x)):
# calculate offset and mask of DCNv2 from middle-level feature
offset_and_mask = self.spatial_conv_offset(x[level])
offset = offset_and_mask[:, :self.offset_dim, :, :]
mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid()
mid_feat = self.spatial_conv_mid(x[level], offset, mask)
sum_feat = mid_feat * self.scale_attn_module(mid_feat)
summed_levels = 1
if level > 0:
low_feat = self.spatial_conv_low(x[level - 1], offset, mask)
sum_feat += low_feat * self.scale_attn_module(low_feat)
summed_levels += 1
if level < len(x) - 1:
# this upsample order is weird, but faster than natural order
# https://github.com/microsoft/DynamicHead/issues/25
high_feat = F.interpolate(
self.spatial_conv_high(x[level + 1], offset, mask),
size=x[level].shape[-2:],
mode='bilinear',
align_corners=True)
sum_feat += high_feat * self.scale_attn_module(high_feat)
summed_levels += 1
outs.append(self.task_attn_module(sum_feat / summed_levels))
return outs
@MODELS.register_module()
class DyHead(BaseModule):
"""DyHead neck consisting of multiple DyHead Blocks.
See `Dynamic Head: Unifying Object Detection Heads with Attentions
<https://arxiv.org/abs/2106.08322>`_ for details.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_blocks (int, optional): Number of DyHead Blocks. Default: 6.
zero_init_offset (bool, optional): Whether to use zero init for
`spatial_conv_offset`. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_blocks=6,
zero_init_offset=True,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_blocks = num_blocks
self.zero_init_offset = zero_init_offset
dyhead_blocks = []
for i in range(num_blocks):
in_channels = self.in_channels if i == 0 else self.out_channels
dyhead_blocks.append(
DyHeadBlock(
in_channels,
self.out_channels,
zero_init_offset=zero_init_offset))
self.dyhead_blocks = nn.Sequential(*dyhead_blocks)
def forward(self, inputs):
"""Forward function."""
assert isinstance(inputs, (tuple, list))
outs = self.dyhead_blocks(inputs)
return tuple(outs)
| 6,859 | 38.425287 | 78 |
py
|
ERD
|
ERD-main/mmdet/models/necks/bfp.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict for
convolution layers.
norm_cfg (:obj:`ConfigDict` or dict, optional): The config dict for
normalization layers.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or
dict], optional): Initialization config dict.
"""
def __init__(
self,
in_channels: int,
num_levels: int,
refine_level: int = 2,
refine_type: str = None,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
| 4,090 | 35.526786 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/yolo_neck.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
class DetectionBlock(BaseModule):
"""Detection block in YOLO neck.
Let out_channels = n, the DetectionBlock contains:
Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
The first 6 ConvLayers are formed the following way:
1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
The Conv2D layer is 1x1x255.
Some block will have branch after the fifth ConvLayer.
The input channel is arbitrary (in_channels)
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
act_cfg: ConfigType = dict(
type='LeakyReLU', negative_slope=0.1),
init_cfg: OptMultiConfig = None) -> None:
super(DetectionBlock, self).__init__(init_cfg)
double_out_channels = out_channels * 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
self.conv2 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
self.conv4 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
def forward(self, x: Tensor) -> Tensor:
tmp = self.conv1(x)
tmp = self.conv2(tmp)
tmp = self.conv3(tmp)
tmp = self.conv4(tmp)
out = self.conv5(tmp)
return out
@MODELS.register_module()
class YOLOV3Neck(BaseModule):
"""The neck of YOLOV3.
It can be treated as a simplified version of FPN. It
will take the result from Darknet backbone and do some upsampling and
concatenation. It will finally output the detection result.
Note:
The input feats should be from top to bottom.
i.e., from high-lvl to low-lvl
But YOLOV3Neck will process them in reversed order.
i.e., from bottom (high-lvl) to top (low-lvl)
Args:
num_scales (int): The number of scales / stages.
in_channels (List[int]): The number of input channels per scale.
out_channels (List[int]): The number of output channels per scale.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Dictionary to construct and config norm
layer. Default: dict(type='BN', requires_grad=True)
act_cfg (dict, optional): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_scales: int,
in_channels: List[int],
out_channels: List[int],
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
act_cfg: ConfigType = dict(
type='LeakyReLU', negative_slope=0.1),
init_cfg: OptMultiConfig = None) -> None:
super(YOLOV3Neck, self).__init__(init_cfg)
assert (num_scales == len(in_channels) == len(out_channels))
self.num_scales = num_scales
self.in_channels = in_channels
self.out_channels = out_channels
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
# To support arbitrary scales, the code looks awful, but it works.
# Better solution is welcomed.
self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
for i in range(1, self.num_scales):
in_c, out_c = self.in_channels[i], self.out_channels[i]
inter_c = out_channels[i - 1]
self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg))
# in_c + out_c : High-lvl feats will be cat with low-lvl feats
self.add_module(f'detect{i+1}',
DetectionBlock(in_c + out_c, out_c, **cfg))
def forward(self, feats=Tuple[Tensor]) -> Tuple[Tensor]:
assert len(feats) == self.num_scales
# processed from bottom (high-lvl) to top (low-lvl)
outs = []
out = self.detect1(feats[-1])
outs.append(out)
for i, x in enumerate(reversed(feats[:-1])):
conv = getattr(self, f'conv{i+1}')
tmp = conv(out)
# Cat with low-lvl feats
tmp = F.interpolate(tmp, scale_factor=2)
tmp = torch.cat((tmp, x), 1)
detect = getattr(self, f'detect{i+2}')
out = detect(tmp)
outs.append(out)
return tuple(outs)
| 5,835 | 38.972603 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/necks/channel_mapper.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class ChannelMapper(BaseModule):
"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Default: None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Default: None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There would
be extra_convs when num_outs larger than the length of in_channels.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict],
optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
kernel_size: int = 3,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = dict(type='ReLU'),
num_outs: int = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
| 4,262 | 38.841121 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/necks/hrfpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
| 3,519 | 33.851485 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/layers/csp_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .se_layer import ChannelAttention
class DarknetBottleneck(BaseModule):
"""The basic bottleneck block used in Darknet.
Each ResBlock consists of two ConvModules and the input is added to the
final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.
The first convLayer has filter size of 1x1 and the second one has the
filter size of 3x3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (float): The kernel size of the convolution.
Defaults to 0.5.
add_identity (bool): Whether to add identity to the out.
Defaults to True.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
conv_cfg (dict): Config dict for convolution layer. Defaults to None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='Swish').
"""
def __init__(self,
in_channels: int,
out_channels: int,
expansion: float = 0.5,
add_identity: bool = True,
use_depthwise: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
hidden_channels = int(out_channels * expansion)
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.conv1 = ConvModule(
in_channels,
hidden_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = conv(
hidden_channels,
out_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.add_identity = \
add_identity and in_channels == out_channels
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.add_identity:
return out + identity
else:
return out
class CSPNeXtBlock(BaseModule):
"""The basic bottleneck block used in CSPNeXt.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (float): Expand ratio of the hidden channel. Defaults to 0.5.
add_identity (bool): Whether to add identity to the out. Only works
when in_channels == out_channels. Defaults to True.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
kernel_size (int): The kernel size of the second convolution layer.
Defaults to 5.
conv_cfg (dict): Config dict for convolution layer. Defaults to None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU').
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
expansion: float = 0.5,
add_identity: bool = True,
use_depthwise: bool = False,
kernel_size: int = 5,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
hidden_channels = int(out_channels * expansion)
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.conv1 = conv(
in_channels,
hidden_channels,
3,
stride=1,
padding=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = DepthwiseSeparableConvModule(
hidden_channels,
out_channels,
kernel_size,
stride=1,
padding=kernel_size // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.add_identity = \
add_identity and in_channels == out_channels
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.add_identity:
return out + identity
else:
return out
class CSPLayer(BaseModule):
"""Cross Stage Partial Layer.
Args:
in_channels (int): The input channels of the CSP layer.
out_channels (int): The output channels of the CSP layer.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Defaults to 0.5.
num_blocks (int): Number of blocks. Defaults to 1.
add_identity (bool): Whether to add identity in blocks.
Defaults to True.
use_cspnext_block (bool): Whether to use CSPNeXt block.
Defaults to False.
use_depthwise (bool): Whether to use depthwise separable convolution in
blocks. Defaults to False.
channel_attention (bool): Whether to add channel attention in each
stage. Defaults to True.
conv_cfg (dict, optional): Config dict for convolution layer.
Defaults to None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='Swish')
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
expand_ratio: float = 0.5,
num_blocks: int = 1,
add_identity: bool = True,
use_depthwise: bool = False,
use_cspnext_block: bool = False,
channel_attention: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
block = CSPNeXtBlock if use_cspnext_block else DarknetBottleneck
mid_channels = int(out_channels * expand_ratio)
self.channel_attention = channel_attention
self.main_conv = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.short_conv = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.final_conv = ConvModule(
2 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.blocks = nn.Sequential(*[
block(
mid_channels,
mid_channels,
1.0,
add_identity,
use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg) for _ in range(num_blocks)
])
if channel_attention:
self.attention = ChannelAttention(2 * mid_channels)
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
x_short = self.short_conv(x)
x_main = self.main_conv(x)
x_main = self.blocks(x_main)
x_final = torch.cat((x_main, x_short), dim=1)
if self.channel_attention:
x_final = self.attention(x_final)
return self.final_conv(x_final)
| 9,136 | 35.991903 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/layers/msdeformattn_pixel_decoder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, ConvModule
from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention
from mmengine.model import (BaseModule, ModuleList, caffe2_xavier_init,
normal_init, xavier_init)
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from ..task_modules.prior_generators import MlvlPointGenerator
from .positional_encoding import SinePositionalEncoding
from .transformer import Mask2FormerTransformerEncoder
@MODELS.register_module()
class MSDeformAttnPixelDecoder(BaseModule):
"""Pixel decoder with multi-scale deformable attention.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
strides (list[int] | tuple[int]): Output strides of feature from
backbone.
feat_channels (int): Number of channels for feature.
out_channels (int): Number of channels for output.
num_outs (int): Number of output scales.
norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`ConfigDict` or dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`ConfigDict` or dict): Config for transformer
encoder. Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict): Config for
transformer encoder position encoding. Defaults to
dict(num_feats=128, normalize=True).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
in_channels: Union[List[int],
Tuple[int]] = [256, 512, 1024, 2048],
strides: Union[List[int], Tuple[int]] = [4, 8, 16, 32],
feat_channels: int = 256,
out_channels: int = 256,
num_outs: int = 3,
norm_cfg: ConfigType = dict(type='GN', num_groups=32),
act_cfg: ConfigType = dict(type='ReLU'),
encoder: ConfigType = None,
positional_encoding: ConfigType = dict(
num_feats=128, normalize=True),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.strides = strides
self.num_input_levels = len(in_channels)
self.num_encoder_levels = \
encoder.layer_cfg.self_attn_cfg.num_levels
assert self.num_encoder_levels >= 1, \
'num_levels in attn_cfgs must be at least one'
input_conv_list = []
# from top to down (low to high resolution)
for i in range(self.num_input_levels - 1,
self.num_input_levels - self.num_encoder_levels - 1,
-1):
input_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
norm_cfg=norm_cfg,
act_cfg=None,
bias=True)
input_conv_list.append(input_conv)
self.input_convs = ModuleList(input_conv_list)
self.encoder = Mask2FormerTransformerEncoder(**encoder)
self.postional_encoding = SinePositionalEncoding(**positional_encoding)
# high resolution to low resolution
self.level_encoding = nn.Embedding(self.num_encoder_levels,
feat_channels)
# fpn-like structure
self.lateral_convs = ModuleList()
self.output_convs = ModuleList()
self.use_bias = norm_cfg is None
# from top to down (low to high resolution)
# fpn for the rest features that didn't pass in encoder
for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,
-1):
lateral_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=None)
output_conv = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
self.mask_feature = Conv2d(
feat_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.num_outs = num_outs
self.point_generator = MlvlPointGenerator(strides)
def init_weights(self) -> None:
"""Initialize weights."""
for i in range(0, self.num_encoder_levels):
xavier_init(
self.input_convs[i].conv,
gain=1,
bias=0,
distribution='uniform')
for i in range(0, self.num_input_levels - self.num_encoder_levels):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
normal_init(self.level_encoding, mean=0, std=1)
for p in self.encoder.parameters():
if p.dim() > 1:
nn.init.xavier_normal_(p)
# init_weights defined in MultiScaleDeformableAttention
for m in self.encoder.layers.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
def forward(self, feats: List[Tensor]) -> Tuple[Tensor, Tensor]:
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
Returns:
tuple: A tuple containing the following:
- mask_feature (Tensor): shape (batch_size, c, h, w).
- multi_scale_features (list[Tensor]): Multi scale \
features, each in shape (batch_size, c, h, w).
"""
# generate padding mask for each level, for each image
batch_size = feats[0].shape[0]
encoder_input_list = []
padding_mask_list = []
level_positional_encoding_list = []
spatial_shapes = []
reference_points_list = []
for i in range(self.num_encoder_levels):
level_idx = self.num_input_levels - i - 1
feat = feats[level_idx]
feat_projected = self.input_convs[i](feat)
h, w = feat.shape[-2:]
# no padding
padding_mask_resized = feat.new_zeros(
(batch_size, ) + feat.shape[-2:], dtype=torch.bool)
pos_embed = self.postional_encoding(padding_mask_resized)
level_embed = self.level_encoding.weight[i]
level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed
# (h_i * w_i, 2)
reference_points = self.point_generator.single_level_grid_priors(
feat.shape[-2:], level_idx, device=feat.device)
# normalize
factor = feat.new_tensor([[w, h]]) * self.strides[level_idx]
reference_points = reference_points / factor
# shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c)
feat_projected = feat_projected.flatten(2).permute(0, 2, 1)
level_pos_embed = level_pos_embed.flatten(2).permute(0, 2, 1)
padding_mask_resized = padding_mask_resized.flatten(1)
encoder_input_list.append(feat_projected)
padding_mask_list.append(padding_mask_resized)
level_positional_encoding_list.append(level_pos_embed)
spatial_shapes.append(feat.shape[-2:])
reference_points_list.append(reference_points)
# shape (batch_size, total_num_queries),
# total_num_queries=sum([., h_i * w_i,.])
padding_masks = torch.cat(padding_mask_list, dim=1)
# shape (total_num_queries, batch_size, c)
encoder_inputs = torch.cat(encoder_input_list, dim=1)
level_positional_encodings = torch.cat(
level_positional_encoding_list, dim=1)
device = encoder_inputs.device
# shape (num_encoder_levels, 2), from low
# resolution to high resolution
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=device)
# shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...)
level_start_index = torch.cat((spatial_shapes.new_zeros(
(1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
reference_points = torch.cat(reference_points_list, dim=0)
reference_points = reference_points[None, :, None].repeat(
batch_size, 1, self.num_encoder_levels, 1)
valid_radios = reference_points.new_ones(
(batch_size, self.num_encoder_levels, 2))
# shape (num_total_queries, batch_size, c)
memory = self.encoder(
query=encoder_inputs,
query_pos=level_positional_encodings,
key_padding_mask=padding_masks,
spatial_shapes=spatial_shapes,
reference_points=reference_points,
level_start_index=level_start_index,
valid_ratios=valid_radios)
# (batch_size, c, num_total_queries)
memory = memory.permute(0, 2, 1)
# from low resolution to high resolution
num_queries_per_level = [e[0] * e[1] for e in spatial_shapes]
outs = torch.split(memory, num_queries_per_level, dim=-1)
outs = [
x.reshape(batch_size, -1, spatial_shapes[i][0],
spatial_shapes[i][1]) for i, x in enumerate(outs)
]
for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,
-1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + F.interpolate(
outs[-1],
size=cur_feat.shape[-2:],
mode='bilinear',
align_corners=False)
y = self.output_convs[i](y)
outs.append(y)
multi_scale_features = outs[:self.num_outs]
mask_feature = self.mask_feature(outs[-1])
return mask_feature, multi_scale_features
| 10,613 | 41.798387 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/layers/dropblock.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
eps = 1e-6
@MODELS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
def extra_repr(self):
return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, '
f'warmup_iters={self.warmup_iters}')
| 2,918 | 32.551724 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/layers/se_layer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmengine.utils import digit_version, is_tuple_of
from torch import Tensor
from mmdet.utils import MultiConfig, OptConfigType, OptMultiConfig
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Defaults to 16.
conv_cfg (None or dict): Config dict for convolution layer.
Defaults to None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Defaults to (dict(type='ReLU'), dict(type='Sigmoid'))
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self,
channels: int,
ratio: int = 16,
conv_cfg: OptConfigType = None,
act_cfg: MultiConfig = (dict(type='ReLU'),
dict(type='Sigmoid')),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x: Tensor) -> Tensor:
"""Forward function for SELayer."""
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
class DyReLU(BaseModule):
"""Dynamic ReLU (DyReLU) module.
See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.
Current implementation is specialized for task-aware attention in DyHead.
HSigmoid arguments in default act_cfg follow DyHead official code.
https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py
Args:
channels (int): The input (and output) channels of DyReLU module.
ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,
the intermediate channel will be ``int(channels/ratio)``.
Defaults to 4.
conv_cfg (None or dict): Config dict for convolution layer.
Defaults to None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Defaults to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0))
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self,
channels: int,
ratio: int = 4,
conv_cfg: OptConfigType = None,
act_cfg: MultiConfig = (dict(type='ReLU'),
dict(
type='HSigmoid',
bias=3.0,
divisor=6.0)),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert is_tuple_of(act_cfg, dict)
self.channels = channels
self.expansion = 4 # for a1, b1, a2, b2
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels * self.expansion,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
coeffs = self.global_avgpool(x)
coeffs = self.conv1(coeffs)
coeffs = self.conv2(coeffs) - 0.5 # value range: [-0.5, 0.5]
a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1)
a1 = a1 * 2.0 + 1.0 # [-1.0, 1.0] + 1.0
a2 = a2 * 2.0 # [-1.0, 1.0]
out = torch.max(x * a1 + b1, x * a2 + b2)
return out
class ChannelAttention(BaseModule):
"""Channel attention Module.
Args:
channels (int): The input (and output) channels of the attention layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True)
if digit_version(torch.__version__) < (1, 7, 0):
self.act = nn.Hardsigmoid()
else:
self.act = nn.Hardsigmoid(inplace=True)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for ChannelAttention."""
with torch.cuda.amp.autocast(enabled=False):
out = self.global_avgpool(x)
out = self.fc(out)
out = self.act(out)
return x * out
| 6,523 | 39.02454 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/layers/normed_predictor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
| 3,343 | 32.777778 | 77 |
py
|
ERD
|
ERD-main/mmdet/models/layers/pixel_decoder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, ConvModule
from mmengine.model import BaseModule, ModuleList, caffe2_xavier_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .positional_encoding import SinePositionalEncoding
from .transformer import DetrTransformerEncoder
@MODELS.register_module()
class PixelDecoder(BaseModule):
"""Pixel decoder with a structure like fpn.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
feat_channels (int): Number channels for feature.
out_channels (int): Number channels for output.
norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`ConfigDict` or dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`ConfigDict` or dict): Config for transorformer
encoder.Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict): Config for
transformer encoder position encoding. Defaults to
dict(type='SinePositionalEncoding', num_feats=128,
normalize=True).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
in_channels: Union[List[int], Tuple[int]],
feat_channels: int,
out_channels: int,
norm_cfg: ConfigType = dict(type='GN', num_groups=32),
act_cfg: ConfigType = dict(type='ReLU'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.num_inputs = len(in_channels)
self.lateral_convs = ModuleList()
self.output_convs = ModuleList()
self.use_bias = norm_cfg is None
for i in range(0, self.num_inputs - 1):
lateral_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=None)
output_conv = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
self.last_feat_conv = ConvModule(
in_channels[-1],
feat_channels,
kernel_size=3,
padding=1,
stride=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.mask_feature = Conv2d(
feat_channels, out_channels, kernel_size=3, stride=1, padding=1)
def init_weights(self) -> None:
"""Initialize weights."""
for i in range(0, self.num_inputs - 2):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
caffe2_xavier_init(self.last_feat_conv, bias=0)
def forward(self, feats: List[Tensor],
batch_img_metas: List[dict]) -> Tuple[Tensor, Tensor]:
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
batch_img_metas (list[dict]): List of image information.
Pass in for creating more accurate padding mask. Not
used here.
Returns:
tuple[Tensor, Tensor]: a tuple containing the following:
- mask_feature (Tensor): Shape (batch_size, c, h, w).
- memory (Tensor): Output of last stage of backbone.\
Shape (batch_size, c, h, w).
"""
y = self.last_feat_conv(feats[-1])
for i in range(self.num_inputs - 2, -1, -1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + \
F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')
y = self.output_convs[i](y)
mask_feature = self.mask_feature(y)
memory = feats[-1]
return mask_feature, memory
@MODELS.register_module()
class TransformerEncoderPixelDecoder(PixelDecoder):
"""Pixel decoder with transormer encoder inside.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
feat_channels (int): Number channels for feature.
out_channels (int): Number channels for output.
norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`ConfigDict` or dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`ConfigDict` or dict): Config for transformer encoder.
Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict): Config for
transformer encoder position encoding. Defaults to
dict(num_feats=128, normalize=True).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
in_channels: Union[List[int], Tuple[int]],
feat_channels: int,
out_channels: int,
norm_cfg: ConfigType = dict(type='GN', num_groups=32),
act_cfg: ConfigType = dict(type='ReLU'),
encoder: ConfigType = None,
positional_encoding: ConfigType = dict(
num_feats=128, normalize=True),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
in_channels=in_channels,
feat_channels=feat_channels,
out_channels=out_channels,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
self.last_feat_conv = None
self.encoder = DetrTransformerEncoder(**encoder)
self.encoder_embed_dims = self.encoder.embed_dims
assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \
'tranformer encoder must equal to feat_channels({})'.format(
feat_channels, self.encoder_embed_dims)
self.positional_encoding = SinePositionalEncoding(
**positional_encoding)
self.encoder_in_proj = Conv2d(
in_channels[-1], feat_channels, kernel_size=1)
self.encoder_out_proj = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def init_weights(self) -> None:
"""Initialize weights."""
for i in range(0, self.num_inputs - 2):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
caffe2_xavier_init(self.encoder_in_proj, bias=0)
caffe2_xavier_init(self.encoder_out_proj.conv, bias=0)
for p in self.encoder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feats: List[Tensor],
batch_img_metas: List[dict]) -> Tuple[Tensor, Tensor]:
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
batch_img_metas (list[dict]): List of image information. Pass in
for creating more accurate padding mask.
Returns:
tuple: a tuple containing the following:
- mask_feature (Tensor): shape (batch_size, c, h, w).
- memory (Tensor): shape (batch_size, c, h, w).
"""
feat_last = feats[-1]
bs, c, h, w = feat_last.shape
input_img_h, input_img_w = batch_img_metas[0]['batch_input_shape']
padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w),
dtype=torch.float32)
for i in range(bs):
img_h, img_w = batch_img_metas[i]['img_shape']
padding_mask[i, :img_h, :img_w] = 0
padding_mask = F.interpolate(
padding_mask.unsqueeze(1),
size=feat_last.shape[-2:],
mode='nearest').to(torch.bool).squeeze(1)
pos_embed = self.positional_encoding(padding_mask)
feat_last = self.encoder_in_proj(feat_last)
# (batch_size, c, h, w) -> (batch_size, num_queries, c)
feat_last = feat_last.flatten(2).permute(0, 2, 1)
pos_embed = pos_embed.flatten(2).permute(0, 2, 1)
# (batch_size, h, w) -> (batch_size, h*w)
padding_mask = padding_mask.flatten(1)
memory = self.encoder(
query=feat_last,
query_pos=pos_embed,
key_padding_mask=padding_mask)
# (batch_size, num_queries, c) -> (batch_size, c, h, w)
memory = memory.permute(0, 2, 1).view(bs, self.encoder_embed_dims, h,
w)
y = self.encoder_out_proj(memory)
for i in range(self.num_inputs - 2, -1, -1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + \
F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')
y = self.output_convs[i](y)
mask_feature = self.mask_feature(y)
return mask_feature, memory
| 10,136 | 39.548 | 79 |
py
|
ERD
|
ERD-main/mmdet/models/layers/conv_upsample.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule, ModuleList
class ConvUpsample(BaseModule):
"""ConvUpsample performs 2x upsampling after Conv.
There are several `ConvModule` layers. In the first few layers, upsampling
will be applied after each layer of convolution. The number of upsampling
must be no more than the number of ConvModule layers.
Args:
in_channels (int): Number of channels in the input feature map.
inner_channels (int): Number of channels produced by the convolution.
num_layers (int): Number of convolution layers.
num_upsample (int | optional): Number of upsampling layer. Must be no
more than num_layers. Upsampling will be applied after the first
``num_upsample`` layers of convolution. Default: ``num_layers``.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict): Config dict for initialization. Default: None.
kwargs (key word augments): Other augments used in ConvModule.
"""
def __init__(self,
in_channels,
inner_channels,
num_layers=1,
num_upsample=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
super(ConvUpsample, self).__init__(init_cfg)
if num_upsample is None:
num_upsample = num_layers
assert num_upsample <= num_layers, \
f'num_upsample({num_upsample})must be no more than ' \
f'num_layers({num_layers})'
self.num_layers = num_layers
self.num_upsample = num_upsample
self.conv = ModuleList()
for i in range(num_layers):
self.conv.append(
ConvModule(
in_channels,
inner_channels,
3,
padding=1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
in_channels = inner_channels
def forward(self, x):
num_upsample = self.num_upsample
for i in range(self.num_layers):
x = self.conv[i](x)
if num_upsample > 0:
num_upsample -= 1
x = F.interpolate(
x, scale_factor=2, mode='bilinear', align_corners=False)
return x
| 2,656 | 38.073529 | 78 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.